2016-08-19 19:28:19 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-08-19 19:28:19 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "cache/clock_cache.h"
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
#ifndef SUPPORT_CLOCK_CACHE
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2019-09-16 22:14:51 +00:00
|
|
|
std::shared_ptr<Cache> NewClockCache(
|
|
|
|
size_t /*capacity*/, int /*num_shard_bits*/, bool /*strict_capacity_limit*/,
|
|
|
|
CacheMetadataChargePolicy /*metadata_charge_policy*/) {
|
2016-08-19 19:28:19 +00:00
|
|
|
// Clock cache not supported.
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <atomic>
|
|
|
|
#include <deque>
|
|
|
|
|
2017-07-28 23:23:50 +00:00
|
|
|
// "tbb/concurrent_hash_map.h" requires RTTI if exception is enabled.
|
|
|
|
// Disable it so users can chooose to disable RTTI.
|
|
|
|
#ifndef ROCKSDB_USE_RTTI
|
|
|
|
#define TBB_USE_EXCEPTIONS 0
|
|
|
|
#endif
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "cache/sharded_cache.h"
|
2021-11-29 18:52:32 +00:00
|
|
|
#include "port/lang.h"
|
2019-09-16 22:14:51 +00:00
|
|
|
#include "port/malloc.h"
|
2016-08-19 19:28:19 +00:00
|
|
|
#include "port/port.h"
|
2021-11-29 18:52:32 +00:00
|
|
|
#include "tbb/concurrent_hash_map.h"
|
2016-08-19 19:28:19 +00:00
|
|
|
#include "util/autovector.h"
|
|
|
|
#include "util/mutexlock.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// An implementation of the Cache interface based on CLOCK algorithm, with
|
|
|
|
// better concurrent performance than LRUCache. The idea of CLOCK algorithm
|
|
|
|
// is to maintain all cache entries in a circular list, and an iterator
|
|
|
|
// (the "head") pointing to the last examined entry. Eviction starts from the
|
|
|
|
// current head. Each entry is given a second chance before eviction, if it
|
|
|
|
// has been access since last examine. In contrast to LRU, no modification
|
|
|
|
// to the internal data-structure (except for flipping the usage bit) needs
|
|
|
|
// to be done upon lookup. This gives us oppertunity to implement a cache
|
|
|
|
// with better concurrency.
|
|
|
|
//
|
|
|
|
// Each cache entry is represented by a cache handle, and all the handles
|
|
|
|
// are arranged in a circular list, as describe above. Upon erase of an entry,
|
|
|
|
// we never remove the handle. Instead, the handle is put into a recycle bin
|
|
|
|
// to be re-use. This is to avoid memory dealocation, which is hard to deal
|
|
|
|
// with in concurrent environment.
|
|
|
|
//
|
|
|
|
// The cache also maintains a concurrent hash map for lookup. Any concurrent
|
|
|
|
// hash map implementation should do the work. We currently use
|
|
|
|
// tbb::concurrent_hash_map because it supports concurrent erase.
|
|
|
|
//
|
|
|
|
// Each cache handle has the following flags and counters, which are squeeze
|
|
|
|
// in an atomic interger, to make sure the handle always be in a consistent
|
|
|
|
// state:
|
|
|
|
//
|
|
|
|
// * In-cache bit: whether the entry is reference by the cache itself. If
|
|
|
|
// an entry is in cache, its key would also be available in the hash map.
|
|
|
|
// * Usage bit: whether the entry has been access by user since last
|
|
|
|
// examine for eviction. Can be reset by eviction.
|
|
|
|
// * Reference count: reference count by user.
|
|
|
|
//
|
|
|
|
// An entry can be reference only when it's in cache. An entry can be evicted
|
|
|
|
// only when it is in cache, has no usage since last examine, and reference
|
|
|
|
// count is zero.
|
|
|
|
//
|
|
|
|
// The follow figure shows a possible layout of the cache. Boxes represents
|
|
|
|
// cache handles and numbers in each box being in-cache bit, usage bit and
|
|
|
|
// reference count respectively.
|
|
|
|
//
|
|
|
|
// hash map:
|
|
|
|
// +-------+--------+
|
|
|
|
// | key | handle |
|
|
|
|
// +-------+--------+
|
|
|
|
// | "foo" | 5 |-------------------------------------+
|
|
|
|
// +-------+--------+ |
|
|
|
|
// | "bar" | 2 |--+ |
|
|
|
|
// +-------+--------+ | |
|
|
|
|
// | |
|
|
|
|
// head | |
|
|
|
|
// | | |
|
|
|
|
// circular list: | | |
|
|
|
|
// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
|
|
|
|
// |(0,0,0)|---|(1,1,0)|---|(0,0,0)|---|(0,1,3)|---|(1,0,0)|---| ...
|
|
|
|
// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
|
|
|
|
// | |
|
|
|
|
// +-------+ +-----------+
|
|
|
|
// | |
|
|
|
|
// +---+---+
|
|
|
|
// recycle bin: | 1 | 3 |
|
|
|
|
// +---+---+
|
|
|
|
//
|
|
|
|
// Suppose we try to insert "baz" into the cache at this point and the cache is
|
|
|
|
// full. The cache will first look for entries to evict, starting from where
|
|
|
|
// head points to (the second entry). It resets usage bit of the second entry,
|
|
|
|
// skips the third and fourth entry since they are not in cache, and finally
|
|
|
|
// evict the fifth entry ("foo"). It looks at recycle bin for available handle,
|
|
|
|
// grabs handle 3, and insert the key into the handle. The following figure
|
|
|
|
// shows the resulting layout.
|
|
|
|
//
|
|
|
|
// hash map:
|
|
|
|
// +-------+--------+
|
|
|
|
// | key | handle |
|
|
|
|
// +-------+--------+
|
|
|
|
// | "baz" | 3 |-------------+
|
|
|
|
// +-------+--------+ |
|
|
|
|
// | "bar" | 2 |--+ |
|
|
|
|
// +-------+--------+ | |
|
|
|
|
// | |
|
|
|
|
// | | head
|
|
|
|
// | | |
|
|
|
|
// circular list: | | |
|
|
|
|
// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
|
|
|
|
// |(0,0,0)|---|(1,0,0)|---|(1,0,0)|---|(0,1,3)|---|(0,0,0)|---| ...
|
|
|
|
// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
|
|
|
|
// | |
|
|
|
|
// +-------+ +-----------------------------------+
|
|
|
|
// | |
|
|
|
|
// +---+---+
|
|
|
|
// recycle bin: | 1 | 5 |
|
|
|
|
// +---+---+
|
|
|
|
//
|
|
|
|
// A global mutex guards the circular list, the head, and the recycle bin.
|
|
|
|
// We additionally require that modifying the hash map needs to hold the mutex.
|
|
|
|
// As such, Modifying the cache (such as Insert() and Erase()) require to
|
|
|
|
// hold the mutex. Lookup() only access the hash map and the flags associated
|
|
|
|
// with each handle, and don't require explicit locking. Release() has to
|
|
|
|
// acquire the mutex only when it releases the last reference to the entry and
|
|
|
|
// the entry has been erased from cache explicitly. A future improvement could
|
|
|
|
// be to remove the mutex completely.
|
|
|
|
//
|
|
|
|
// Benchmark:
|
|
|
|
// We run readrandom db_bench on a test DB of size 13GB, with size of each
|
|
|
|
// level:
|
|
|
|
//
|
|
|
|
// Level Files Size(MB)
|
|
|
|
// -------------------------
|
|
|
|
// L0 1 0.01
|
|
|
|
// L1 18 17.32
|
|
|
|
// L2 230 182.94
|
|
|
|
// L3 1186 1833.63
|
|
|
|
// L4 4602 8140.30
|
|
|
|
//
|
|
|
|
// We test with both 32 and 16 read threads, with 2GB cache size (the whole DB
|
|
|
|
// doesn't fits in) and 64GB cache size (the whole DB can fit in cache), and
|
|
|
|
// whether to put index and filter blocks in block cache. The benchmark runs
|
|
|
|
// with
|
|
|
|
// with RocksDB 4.10. We got the following result:
|
|
|
|
//
|
|
|
|
// Threads Cache Cache ClockCache LRUCache
|
|
|
|
// Size Index/Filter Throughput(MB/s) Hit Throughput(MB/s) Hit
|
|
|
|
// 32 2GB yes 466.7 85.9% 433.7 86.5%
|
|
|
|
// 32 2GB no 529.9 72.7% 532.7 73.9%
|
|
|
|
// 32 64GB yes 649.9 99.9% 507.9 99.9%
|
|
|
|
// 32 64GB no 740.4 99.9% 662.8 99.9%
|
|
|
|
// 16 2GB yes 278.4 85.9% 283.4 86.5%
|
|
|
|
// 16 2GB no 318.6 72.7% 335.8 73.9%
|
|
|
|
// 16 64GB yes 391.9 99.9% 353.3 99.9%
|
|
|
|
// 16 64GB no 433.8 99.8% 419.4 99.8%
|
|
|
|
|
|
|
|
// Cache entry meta data.
|
|
|
|
struct CacheHandle {
|
|
|
|
Slice key;
|
|
|
|
void* value;
|
|
|
|
size_t charge;
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
Cache::DeleterFn deleter;
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
uint32_t hash;
|
|
|
|
|
|
|
|
// Addition to "charge" to get "total charge" under metadata policy.
|
|
|
|
uint32_t meta_charge;
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
// Flags and counters associated with the cache handle:
|
2020-05-11 21:56:36 +00:00
|
|
|
// lowest bit: in-cache bit
|
2016-08-19 19:28:19 +00:00
|
|
|
// second lowest bit: usage bit
|
|
|
|
// the rest bits: reference count
|
|
|
|
// The handle is unused when flags equals to 0. The thread decreases the count
|
|
|
|
// to 0 is responsible to put the handle back to recycle_ and cleanup memory.
|
|
|
|
std::atomic<uint32_t> flags;
|
|
|
|
|
|
|
|
CacheHandle() = default;
|
|
|
|
|
|
|
|
CacheHandle(const CacheHandle& a) { *this = a; }
|
|
|
|
|
2020-03-31 23:09:11 +00:00
|
|
|
CacheHandle(const Slice& k, void* v,
|
|
|
|
void (*del)(const Slice& key, void* value))
|
2016-08-23 20:53:49 +00:00
|
|
|
: key(k), value(v), deleter(del) {}
|
|
|
|
|
2016-08-19 19:28:19 +00:00
|
|
|
CacheHandle& operator=(const CacheHandle& a) {
|
|
|
|
// Only copy members needed for deletion.
|
|
|
|
key = a.key;
|
|
|
|
value = a.value;
|
|
|
|
deleter = a.deleter;
|
|
|
|
return *this;
|
|
|
|
}
|
2019-09-16 22:14:51 +00:00
|
|
|
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
inline static uint32_t CalcMetadataCharge(
|
|
|
|
Slice key, CacheMetadataChargePolicy metadata_charge_policy) {
|
2019-09-16 22:14:51 +00:00
|
|
|
size_t meta_charge = 0;
|
|
|
|
if (metadata_charge_policy == kFullChargeCacheMetadata) {
|
|
|
|
meta_charge += sizeof(CacheHandle);
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
meta_charge +=
|
|
|
|
malloc_usable_size(static_cast<void*>(const_cast<char*>(key.data())));
|
|
|
|
#else
|
|
|
|
meta_charge += key.size();
|
|
|
|
#endif
|
|
|
|
}
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
assert(meta_charge <= UINT32_MAX);
|
|
|
|
return static_cast<uint32_t>(meta_charge);
|
2019-09-16 22:14:51 +00:00
|
|
|
}
|
|
|
|
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
inline size_t GetTotalCharge() { return charge + meta_charge; }
|
2016-08-19 19:28:19 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Key of hash map. We store hash value with the key for convenience.
|
2021-12-17 22:13:52 +00:00
|
|
|
struct ClockCacheKey {
|
2016-08-19 19:28:19 +00:00
|
|
|
Slice key;
|
|
|
|
uint32_t hash_value;
|
|
|
|
|
2021-12-17 22:13:52 +00:00
|
|
|
ClockCacheKey() = default;
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2021-12-17 22:13:52 +00:00
|
|
|
ClockCacheKey(const Slice& k, uint32_t h) {
|
2016-08-19 19:28:19 +00:00
|
|
|
key = k;
|
|
|
|
hash_value = h;
|
|
|
|
}
|
|
|
|
|
2021-12-17 22:13:52 +00:00
|
|
|
static bool equal(const ClockCacheKey& a, const ClockCacheKey& b) {
|
2016-08-19 19:28:19 +00:00
|
|
|
return a.hash_value == b.hash_value && a.key == b.key;
|
|
|
|
}
|
|
|
|
|
2021-12-17 22:13:52 +00:00
|
|
|
static size_t hash(const ClockCacheKey& a) {
|
2016-08-19 19:28:19 +00:00
|
|
|
return static_cast<size_t>(a.hash_value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct CleanupContext {
|
|
|
|
// List of values to be deleted, along with the key and deleter.
|
|
|
|
autovector<CacheHandle> to_delete_value;
|
|
|
|
|
|
|
|
// List of keys to be deleted.
|
|
|
|
autovector<const char*> to_delete_key;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A cache shard which maintains its own CLOCK cache.
|
2019-04-05 23:05:10 +00:00
|
|
|
class ClockCacheShard final : public CacheShard {
|
2016-08-19 19:28:19 +00:00
|
|
|
public:
|
|
|
|
// Hash map type.
|
2021-12-17 22:13:52 +00:00
|
|
|
using HashTable =
|
|
|
|
tbb::concurrent_hash_map<ClockCacheKey, CacheHandle*, ClockCacheKey>;
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
ClockCacheShard();
|
2019-02-14 21:52:47 +00:00
|
|
|
~ClockCacheShard() override;
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
// Interfaces
|
2019-02-14 21:52:47 +00:00
|
|
|
void SetCapacity(size_t capacity) override;
|
|
|
|
void SetStrictCapacityLimit(bool strict_capacity_limit) override;
|
|
|
|
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
2020-03-31 23:09:11 +00:00
|
|
|
void (*deleter)(const Slice& key, void* value),
|
|
|
|
Cache::Handle** handle, Cache::Priority priority) override;
|
2021-05-14 05:57:51 +00:00
|
|
|
Status Insert(const Slice& key, uint32_t hash, void* value,
|
|
|
|
const Cache::CacheItemHelper* helper, size_t charge,
|
|
|
|
Cache::Handle** handle, Cache::Priority priority) override {
|
|
|
|
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
|
|
|
|
}
|
2019-02-14 21:52:47 +00:00
|
|
|
Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
|
2021-05-14 05:57:51 +00:00
|
|
|
Cache::Handle* Lookup(const Slice& key, uint32_t hash,
|
|
|
|
const Cache::CacheItemHelper* /*helper*/,
|
|
|
|
const Cache::CreateCallback& /*create_cb*/,
|
2021-08-17 04:00:17 +00:00
|
|
|
Cache::Priority /*priority*/, bool /*wait*/,
|
|
|
|
Statistics* /*stats*/) override {
|
2021-05-14 05:57:51 +00:00
|
|
|
return Lookup(key, hash);
|
|
|
|
}
|
|
|
|
bool Release(Cache::Handle* handle, bool /*useful*/,
|
|
|
|
bool force_erase) override {
|
|
|
|
return Release(handle, force_erase);
|
|
|
|
}
|
|
|
|
bool IsReady(Cache::Handle* /*handle*/) override { return true; }
|
|
|
|
void Wait(Cache::Handle* /*handle*/) override {}
|
|
|
|
|
2017-01-11 00:48:23 +00:00
|
|
|
// If the entry in in cache, increase reference count and return true.
|
|
|
|
// Return false otherwise.
|
|
|
|
//
|
|
|
|
// Not necessary to hold mutex_ before being called.
|
2019-02-14 21:52:47 +00:00
|
|
|
bool Ref(Cache::Handle* handle) override;
|
|
|
|
bool Release(Cache::Handle* handle, bool force_erase = false) override;
|
|
|
|
void Erase(const Slice& key, uint32_t hash) override;
|
2017-04-24 18:21:47 +00:00
|
|
|
bool EraseAndConfirm(const Slice& key, uint32_t hash,
|
|
|
|
CleanupContext* context);
|
2019-02-14 21:52:47 +00:00
|
|
|
size_t GetUsage() const override;
|
|
|
|
size_t GetPinnedUsage() const override;
|
|
|
|
void EraseUnRefEntries() override;
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
void ApplyToSomeEntries(
|
|
|
|
const std::function<void(const Slice& key, void* value, size_t charge,
|
|
|
|
DeleterFn deleter)>& callback,
|
|
|
|
uint32_t average_entries_per_lock, uint32_t* state) override;
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
static const uint32_t kInCacheBit = 1;
|
|
|
|
static const uint32_t kUsageBit = 2;
|
|
|
|
static const uint32_t kRefsOffset = 2;
|
|
|
|
static const uint32_t kOneRef = 1 << kRefsOffset;
|
|
|
|
|
|
|
|
// Helper functions to extract cache handle flags and counters.
|
|
|
|
static bool InCache(uint32_t flags) { return flags & kInCacheBit; }
|
|
|
|
static bool HasUsage(uint32_t flags) { return flags & kUsageBit; }
|
|
|
|
static uint32_t CountRefs(uint32_t flags) { return flags >> kRefsOffset; }
|
|
|
|
|
|
|
|
// Decrease reference count of the entry. If this decreases the count to 0,
|
|
|
|
// recycle the entry. If set_usage is true, also set the usage bit.
|
|
|
|
//
|
2017-04-24 18:21:47 +00:00
|
|
|
// returns true if a value is erased.
|
|
|
|
//
|
2016-08-19 19:28:19 +00:00
|
|
|
// Not necessary to hold mutex_ before being called.
|
2017-04-24 18:21:47 +00:00
|
|
|
bool Unref(CacheHandle* handle, bool set_usage, CleanupContext* context);
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
// Unset in-cache bit of the entry. Recycle the handle if necessary.
|
|
|
|
//
|
2017-04-24 18:21:47 +00:00
|
|
|
// returns true if a value is erased.
|
|
|
|
//
|
2016-08-19 19:28:19 +00:00
|
|
|
// Has to hold mutex_ before being called.
|
2017-04-24 18:21:47 +00:00
|
|
|
bool UnsetInCache(CacheHandle* handle, CleanupContext* context);
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
// Put the handle back to recycle_ list, and put the value associated with
|
|
|
|
// it into to-be-deleted list. It doesn't cleanup the key as it might be
|
|
|
|
// reused by another handle.
|
|
|
|
//
|
|
|
|
// Has to hold mutex_ before being called.
|
|
|
|
void RecycleHandle(CacheHandle* handle, CleanupContext* context);
|
|
|
|
|
|
|
|
// Delete keys and values in to-be-deleted list. Call the method without
|
|
|
|
// holding mutex, as destructors can be expensive.
|
|
|
|
void Cleanup(const CleanupContext& context);
|
|
|
|
|
|
|
|
// Examine the handle for eviction. If the handle is in cache, usage bit is
|
|
|
|
// not set, and referece count is 0, evict it from cache. Otherwise unset
|
|
|
|
// the usage bit.
|
|
|
|
//
|
|
|
|
// Has to hold mutex_ before being called.
|
|
|
|
bool TryEvict(CacheHandle* value, CleanupContext* context);
|
|
|
|
|
|
|
|
// Scan through the circular list, evict entries until we get enough capacity
|
|
|
|
// for new cache entry of specific size. Return true if success, false
|
|
|
|
// otherwise.
|
|
|
|
//
|
|
|
|
// Has to hold mutex_ before being called.
|
|
|
|
bool EvictFromCache(size_t charge, CleanupContext* context);
|
|
|
|
|
|
|
|
CacheHandle* Insert(const Slice& key, uint32_t hash, void* value,
|
2020-03-31 23:09:11 +00:00
|
|
|
size_t change,
|
|
|
|
void (*deleter)(const Slice& key, void* value),
|
2020-04-27 20:18:18 +00:00
|
|
|
bool hold_reference, CleanupContext* context,
|
|
|
|
bool* overwritten);
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
// Guards list_, head_, and recycle_. In addition, updating table_ also has
|
|
|
|
// to hold the mutex, to avoid the cache being in inconsistent state.
|
|
|
|
mutable port::Mutex mutex_;
|
|
|
|
|
|
|
|
// The circular list of cache handles. Initially the list is empty. Once a
|
|
|
|
// handle is needed by insertion, and no more handles are available in
|
|
|
|
// recycle bin, one more handle is appended to the end.
|
|
|
|
//
|
|
|
|
// We use std::deque for the circular list because we want to make sure
|
|
|
|
// pointers to handles are valid through out the life-cycle of the cache
|
|
|
|
// (in contrast to std::vector), and be able to grow the list (in contrast
|
|
|
|
// to statically allocated arrays).
|
|
|
|
std::deque<CacheHandle> list_;
|
|
|
|
|
|
|
|
// Pointer to the next handle in the circular list to be examine for
|
|
|
|
// eviction.
|
|
|
|
size_t head_;
|
|
|
|
|
|
|
|
// Recycle bin of cache handles.
|
|
|
|
autovector<CacheHandle*> recycle_;
|
|
|
|
|
|
|
|
// Maximum cache size.
|
|
|
|
std::atomic<size_t> capacity_;
|
|
|
|
|
|
|
|
// Current total size of the cache.
|
|
|
|
std::atomic<size_t> usage_;
|
|
|
|
|
|
|
|
// Total un-released cache size.
|
|
|
|
std::atomic<size_t> pinned_usage_;
|
|
|
|
|
|
|
|
// Whether allow insert into cache if cache is full.
|
|
|
|
std::atomic<bool> strict_capacity_limit_;
|
|
|
|
|
|
|
|
// Hash table (tbb::concurrent_hash_map) for lookup.
|
|
|
|
HashTable table_;
|
|
|
|
};
|
|
|
|
|
|
|
|
ClockCacheShard::ClockCacheShard()
|
|
|
|
: head_(0), usage_(0), pinned_usage_(0), strict_capacity_limit_(false) {}
|
|
|
|
|
2016-08-31 15:56:34 +00:00
|
|
|
ClockCacheShard::~ClockCacheShard() {
|
|
|
|
for (auto& handle : list_) {
|
|
|
|
uint32_t flags = handle.flags.load(std::memory_order_relaxed);
|
|
|
|
if (InCache(flags) || CountRefs(flags) > 0) {
|
2018-04-05 18:49:42 +00:00
|
|
|
if (handle.deleter != nullptr) {
|
|
|
|
(*handle.deleter)(handle.key, handle.value);
|
|
|
|
}
|
2016-08-31 15:56:34 +00:00
|
|
|
delete[] handle.key.data();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-19 19:28:19 +00:00
|
|
|
size_t ClockCacheShard::GetUsage() const {
|
|
|
|
return usage_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t ClockCacheShard::GetPinnedUsage() const {
|
|
|
|
return pinned_usage_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
void ClockCacheShard::ApplyToSomeEntries(
|
|
|
|
const std::function<void(const Slice& key, void* value, size_t charge,
|
|
|
|
DeleterFn deleter)>& callback,
|
|
|
|
uint32_t average_entries_per_lock, uint32_t* state) {
|
|
|
|
assert(average_entries_per_lock > 0);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
|
|
|
|
// Figure out the range to iterate, update `state`
|
|
|
|
size_t list_size = list_.size();
|
|
|
|
size_t start_idx = *state;
|
|
|
|
size_t end_idx = start_idx + average_entries_per_lock;
|
|
|
|
if (start_idx > list_size) {
|
|
|
|
// Shouldn't reach here, but recoverable
|
|
|
|
assert(false);
|
|
|
|
// Mark finished with all
|
|
|
|
*state = UINT32_MAX;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (end_idx >= list_size || end_idx >= UINT32_MAX) {
|
|
|
|
// This also includes the hypothetical case of >4 billion
|
|
|
|
// cache handles.
|
|
|
|
end_idx = list_size;
|
|
|
|
// Mark finished with all
|
|
|
|
*state = UINT32_MAX;
|
|
|
|
} else {
|
2021-05-12 17:44:28 +00:00
|
|
|
*state = static_cast<uint32_t>(end_idx);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
|
|
|
|
// Do the iteration
|
|
|
|
auto cur = list_.begin() + start_idx;
|
|
|
|
auto end = list_.begin() + end_idx;
|
|
|
|
for (; cur != end; ++cur) {
|
|
|
|
const CacheHandle& handle = *cur;
|
|
|
|
// Use relaxed semantics instead of acquire semantics since we are
|
|
|
|
// holding mutex
|
2016-08-19 19:28:19 +00:00
|
|
|
uint32_t flags = handle.flags.load(std::memory_order_relaxed);
|
|
|
|
if (InCache(flags)) {
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
callback(handle.key, handle.value, handle.charge, handle.deleter);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::RecycleHandle(CacheHandle* handle,
|
|
|
|
CleanupContext* context) {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
assert(!InCache(handle->flags) && CountRefs(handle->flags) == 0);
|
2016-08-31 15:56:34 +00:00
|
|
|
context->to_delete_key.push_back(handle->key.data());
|
2016-08-19 19:28:19 +00:00
|
|
|
context->to_delete_value.emplace_back(*handle);
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
size_t total_charge = handle->GetTotalCharge();
|
|
|
|
// clearing `handle` fields would go here but not strictly required
|
2016-08-19 19:28:19 +00:00
|
|
|
recycle_.push_back(handle);
|
2019-09-16 22:14:51 +00:00
|
|
|
usage_.fetch_sub(total_charge, std::memory_order_relaxed);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::Cleanup(const CleanupContext& context) {
|
|
|
|
for (const CacheHandle& handle : context.to_delete_value) {
|
|
|
|
if (handle.deleter) {
|
|
|
|
(*handle.deleter)(handle.key, handle.value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const char* key : context.to_delete_key) {
|
|
|
|
delete[] key;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:48:23 +00:00
|
|
|
bool ClockCacheShard::Ref(Cache::Handle* h) {
|
|
|
|
auto handle = reinterpret_cast<CacheHandle*>(h);
|
2016-08-19 19:28:19 +00:00
|
|
|
// CAS loop to increase reference count.
|
|
|
|
uint32_t flags = handle->flags.load(std::memory_order_relaxed);
|
|
|
|
while (InCache(flags)) {
|
|
|
|
// Use acquire semantics on success, as further operations on the cache
|
|
|
|
// entry has to be order after reference count is increased.
|
|
|
|
if (handle->flags.compare_exchange_weak(flags, flags + kOneRef,
|
|
|
|
std::memory_order_acquire,
|
|
|
|
std::memory_order_relaxed)) {
|
|
|
|
if (CountRefs(flags) == 0) {
|
|
|
|
// No reference count before the operation.
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
size_t total_charge = handle->GetTotalCharge();
|
2019-09-16 22:14:51 +00:00
|
|
|
pinned_usage_.fetch_add(total_charge, std::memory_order_relaxed);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-24 18:21:47 +00:00
|
|
|
bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
|
2016-08-19 19:28:19 +00:00
|
|
|
CleanupContext* context) {
|
|
|
|
if (set_usage) {
|
|
|
|
handle->flags.fetch_or(kUsageBit, std::memory_order_relaxed);
|
|
|
|
}
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
// If the handle reaches state refs=0 and InCache=true after this
|
|
|
|
// atomic operation then we cannot access `handle` afterward, because
|
|
|
|
// it could be evicted before we access the `handle`.
|
|
|
|
size_t total_charge = handle->GetTotalCharge();
|
|
|
|
|
2016-08-19 19:28:19 +00:00
|
|
|
// Use acquire-release semantics as previous operations on the cache entry
|
|
|
|
// has to be order before reference count is decreased, and potential cleanup
|
|
|
|
// of the entry has to be order after.
|
|
|
|
uint32_t flags = handle->flags.fetch_sub(kOneRef, std::memory_order_acq_rel);
|
|
|
|
assert(CountRefs(flags) > 0);
|
|
|
|
if (CountRefs(flags) == 1) {
|
|
|
|
// this is the last reference.
|
2019-09-16 22:14:51 +00:00
|
|
|
pinned_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
|
2016-08-19 19:28:19 +00:00
|
|
|
// Cleanup if it is the last reference.
|
|
|
|
if (!InCache(flags)) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
RecycleHandle(handle, context);
|
|
|
|
}
|
|
|
|
}
|
2017-04-24 18:21:47 +00:00
|
|
|
return context->to_delete_value.size();
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 18:21:47 +00:00
|
|
|
bool ClockCacheShard::UnsetInCache(CacheHandle* handle,
|
2016-08-19 19:28:19 +00:00
|
|
|
CleanupContext* context) {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
// Use acquire-release semantics as previous operations on the cache entry
|
|
|
|
// has to be order before reference count is decreased, and potential cleanup
|
|
|
|
// of the entry has to be order after.
|
|
|
|
uint32_t flags =
|
|
|
|
handle->flags.fetch_and(~kInCacheBit, std::memory_order_acq_rel);
|
|
|
|
// Cleanup if it is the last reference.
|
|
|
|
if (InCache(flags) && CountRefs(flags) == 0) {
|
|
|
|
RecycleHandle(handle, context);
|
|
|
|
}
|
2017-04-24 18:21:47 +00:00
|
|
|
return context->to_delete_value.size();
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool ClockCacheShard::TryEvict(CacheHandle* handle, CleanupContext* context) {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
uint32_t flags = kInCacheBit;
|
|
|
|
if (handle->flags.compare_exchange_strong(flags, 0, std::memory_order_acquire,
|
|
|
|
std::memory_order_relaxed)) {
|
2016-08-31 15:56:34 +00:00
|
|
|
bool erased __attribute__((__unused__)) =
|
2021-12-17 22:13:52 +00:00
|
|
|
table_.erase(ClockCacheKey(handle->key, handle->hash));
|
2016-08-31 15:56:34 +00:00
|
|
|
assert(erased);
|
2016-08-19 19:28:19 +00:00
|
|
|
RecycleHandle(handle, context);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
handle->flags.fetch_and(~kUsageBit, std::memory_order_relaxed);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ClockCacheShard::EvictFromCache(size_t charge, CleanupContext* context) {
|
|
|
|
size_t usage = usage_.load(std::memory_order_relaxed);
|
|
|
|
size_t capacity = capacity_.load(std::memory_order_relaxed);
|
|
|
|
if (usage == 0) {
|
|
|
|
return charge <= capacity;
|
|
|
|
}
|
|
|
|
size_t new_head = head_;
|
|
|
|
bool second_iteration = false;
|
|
|
|
while (usage + charge > capacity) {
|
|
|
|
assert(new_head < list_.size());
|
|
|
|
if (TryEvict(&list_[new_head], context)) {
|
|
|
|
usage = usage_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
new_head = (new_head + 1 >= list_.size()) ? 0 : new_head + 1;
|
|
|
|
if (new_head == head_) {
|
|
|
|
if (second_iteration) {
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
second_iteration = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
head_ = new_head;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::SetCapacity(size_t capacity) {
|
|
|
|
CleanupContext context;
|
|
|
|
{
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
capacity_.store(capacity, std::memory_order_relaxed);
|
|
|
|
EvictFromCache(0, &context);
|
|
|
|
}
|
|
|
|
Cleanup(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
|
|
|
strict_capacity_limit_.store(strict_capacity_limit,
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2020-03-31 23:09:11 +00:00
|
|
|
CacheHandle* ClockCacheShard::Insert(
|
|
|
|
const Slice& key, uint32_t hash, void* value, size_t charge,
|
|
|
|
void (*deleter)(const Slice& key, void* value), bool hold_reference,
|
2020-04-27 20:18:18 +00:00
|
|
|
CleanupContext* context, bool* overwritten) {
|
|
|
|
assert(overwritten != nullptr && *overwritten == false);
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
uint32_t meta_charge =
|
|
|
|
CacheHandle::CalcMetadataCharge(key, metadata_charge_policy_);
|
|
|
|
size_t total_charge = charge + meta_charge;
|
2016-08-19 19:28:19 +00:00
|
|
|
MutexLock l(&mutex_);
|
2019-09-16 22:14:51 +00:00
|
|
|
bool success = EvictFromCache(total_charge, context);
|
2016-08-19 19:28:19 +00:00
|
|
|
bool strict = strict_capacity_limit_.load(std::memory_order_relaxed);
|
2016-08-23 20:53:49 +00:00
|
|
|
if (!success && (strict || !hold_reference)) {
|
|
|
|
context->to_delete_key.push_back(key.data());
|
|
|
|
if (!hold_reference) {
|
|
|
|
context->to_delete_value.emplace_back(key, value, deleter);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Grab available handle from recycle bin. If recycle bin is empty, create
|
|
|
|
// and append new handle to end of circular list.
|
|
|
|
CacheHandle* handle = nullptr;
|
|
|
|
if (!recycle_.empty()) {
|
|
|
|
handle = recycle_.back();
|
|
|
|
recycle_.pop_back();
|
|
|
|
} else {
|
|
|
|
list_.emplace_back();
|
|
|
|
handle = &list_.back();
|
|
|
|
}
|
|
|
|
// Fill handle.
|
|
|
|
handle->key = key;
|
|
|
|
handle->hash = hash;
|
|
|
|
handle->value = value;
|
|
|
|
handle->charge = charge;
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
handle->meta_charge = meta_charge;
|
2016-08-19 19:28:19 +00:00
|
|
|
handle->deleter = deleter;
|
|
|
|
uint32_t flags = hold_reference ? kInCacheBit + kOneRef : kInCacheBit;
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
|
|
|
|
// TODO investigate+fix suspected race condition:
|
|
|
|
// [thread 1] Lookup starts, up to Ref()
|
|
|
|
// [thread 2] Erase/evict the entry just looked up
|
|
|
|
// [thread 1] Ref() the handle, even though it's in the recycle bin
|
|
|
|
// [thread 2] Insert with recycling that handle
|
|
|
|
// Here we obliterate the other thread's Ref
|
|
|
|
// Possible fix: never blindly overwrite the flags, but only make
|
|
|
|
// relative updates (fetch_add, etc).
|
2016-08-19 19:28:19 +00:00
|
|
|
handle->flags.store(flags, std::memory_order_relaxed);
|
|
|
|
HashTable::accessor accessor;
|
2021-12-17 22:13:52 +00:00
|
|
|
if (table_.find(accessor, ClockCacheKey(key, hash))) {
|
2020-04-27 20:18:18 +00:00
|
|
|
*overwritten = true;
|
2016-08-19 19:28:19 +00:00
|
|
|
CacheHandle* existing_handle = accessor->second;
|
2016-08-31 15:56:34 +00:00
|
|
|
table_.erase(accessor);
|
2016-08-19 19:28:19 +00:00
|
|
|
UnsetInCache(existing_handle, context);
|
|
|
|
}
|
2021-12-17 22:13:52 +00:00
|
|
|
table_.insert(HashTable::value_type(ClockCacheKey(key, hash), handle));
|
2016-08-19 19:28:19 +00:00
|
|
|
if (hold_reference) {
|
2019-09-16 22:14:51 +00:00
|
|
|
pinned_usage_.fetch_add(total_charge, std::memory_order_relaxed);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2019-09-16 22:14:51 +00:00
|
|
|
usage_.fetch_add(total_charge, std::memory_order_relaxed);
|
2016-08-19 19:28:19 +00:00
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
2020-03-31 23:09:11 +00:00
|
|
|
size_t charge,
|
|
|
|
void (*deleter)(const Slice& key, void* value),
|
2016-08-23 20:53:49 +00:00
|
|
|
Cache::Handle** out_handle,
|
2018-03-05 21:08:17 +00:00
|
|
|
Cache::Priority /*priority*/) {
|
2016-08-19 19:28:19 +00:00
|
|
|
CleanupContext context;
|
|
|
|
HashTable::accessor accessor;
|
|
|
|
char* key_data = new char[key.size()];
|
|
|
|
memcpy(key_data, key.data(), key.size());
|
|
|
|
Slice key_copy(key_data, key.size());
|
2020-04-27 20:18:18 +00:00
|
|
|
bool overwritten = false;
|
2016-08-23 20:53:49 +00:00
|
|
|
CacheHandle* handle = Insert(key_copy, hash, value, charge, deleter,
|
2020-04-27 20:18:18 +00:00
|
|
|
out_handle != nullptr, &context, &overwritten);
|
2016-08-19 19:28:19 +00:00
|
|
|
Status s;
|
2016-08-23 20:53:49 +00:00
|
|
|
if (out_handle != nullptr) {
|
|
|
|
if (handle == nullptr) {
|
2022-03-01 18:57:09 +00:00
|
|
|
s = Status::Incomplete("Insert failed due to CLOCK cache being full.");
|
2016-08-23 20:53:49 +00:00
|
|
|
} else {
|
|
|
|
*out_handle = reinterpret_cast<Cache::Handle*>(handle);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2020-04-27 20:18:18 +00:00
|
|
|
if (overwritten) {
|
|
|
|
assert(s.ok());
|
|
|
|
s = Status::OkOverwritten();
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
Cleanup(context);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
|
|
|
|
HashTable::const_accessor accessor;
|
2021-12-17 22:13:52 +00:00
|
|
|
if (!table_.find(accessor, ClockCacheKey(key, hash))) {
|
2016-08-19 19:28:19 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
CacheHandle* handle = accessor->second;
|
|
|
|
accessor.release();
|
|
|
|
// Ref() could fail if another thread sneak in and evict/erase the cache
|
|
|
|
// entry before we are able to hold reference.
|
2017-01-11 00:48:23 +00:00
|
|
|
if (!Ref(reinterpret_cast<Cache::Handle*>(handle))) {
|
2016-08-19 19:28:19 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Double check the key since the handle may now representing another key
|
|
|
|
// if other threads sneak in, evict/erase the entry and re-used the handle
|
|
|
|
// for another cache entry.
|
|
|
|
if (hash != handle->hash || key != handle->key) {
|
|
|
|
CleanupContext context;
|
|
|
|
Unref(handle, false, &context);
|
|
|
|
// It is possible Unref() delete the entry, so we need to cleanup.
|
|
|
|
Cleanup(context);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return reinterpret_cast<Cache::Handle*>(handle);
|
|
|
|
}
|
|
|
|
|
2017-04-24 18:21:47 +00:00
|
|
|
bool ClockCacheShard::Release(Cache::Handle* h, bool force_erase) {
|
2016-08-19 19:28:19 +00:00
|
|
|
CleanupContext context;
|
|
|
|
CacheHandle* handle = reinterpret_cast<CacheHandle*>(h);
|
2017-04-24 18:21:47 +00:00
|
|
|
bool erased = Unref(handle, true, &context);
|
|
|
|
if (force_erase && !erased) {
|
|
|
|
erased = EraseAndConfirm(handle->key, handle->hash, &context);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
Cleanup(context);
|
2017-04-24 18:21:47 +00:00
|
|
|
return erased;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::Erase(const Slice& key, uint32_t hash) {
|
|
|
|
CleanupContext context;
|
2017-04-24 18:21:47 +00:00
|
|
|
EraseAndConfirm(key, hash, &context);
|
2016-08-19 19:28:19 +00:00
|
|
|
Cleanup(context);
|
|
|
|
}
|
|
|
|
|
2017-04-24 18:21:47 +00:00
|
|
|
bool ClockCacheShard::EraseAndConfirm(const Slice& key, uint32_t hash,
|
|
|
|
CleanupContext* context) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
HashTable::accessor accessor;
|
|
|
|
bool erased = false;
|
2021-12-17 22:13:52 +00:00
|
|
|
if (table_.find(accessor, ClockCacheKey(key, hash))) {
|
2017-04-24 18:21:47 +00:00
|
|
|
CacheHandle* handle = accessor->second;
|
|
|
|
table_.erase(accessor);
|
|
|
|
erased = UnsetInCache(handle, context);
|
|
|
|
}
|
|
|
|
return erased;
|
|
|
|
}
|
|
|
|
|
2016-08-19 19:28:19 +00:00
|
|
|
void ClockCacheShard::EraseUnRefEntries() {
|
|
|
|
CleanupContext context;
|
|
|
|
{
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
table_.clear();
|
|
|
|
for (auto& handle : list_) {
|
|
|
|
UnsetInCache(&handle, &context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Cleanup(context);
|
|
|
|
}
|
|
|
|
|
2019-04-05 23:05:10 +00:00
|
|
|
class ClockCache final : public ShardedCache {
|
2016-08-19 19:28:19 +00:00
|
|
|
public:
|
2019-09-16 22:14:51 +00:00
|
|
|
ClockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy)
|
2016-08-19 19:28:19 +00:00
|
|
|
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
|
|
|
|
int num_shards = 1 << num_shard_bits;
|
|
|
|
shards_ = new ClockCacheShard[num_shards];
|
2019-09-16 22:14:51 +00:00
|
|
|
for (int i = 0; i < num_shards; i++) {
|
|
|
|
shards_[i].set_metadata_charge_policy(metadata_charge_policy);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
SetCapacity(capacity);
|
|
|
|
SetStrictCapacityLimit(strict_capacity_limit);
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~ClockCache() override { delete[] shards_; }
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return "ClockCache"; }
|
2016-08-19 19:28:19 +00:00
|
|
|
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
CacheShard* GetShard(uint32_t shard) override {
|
2016-08-19 19:28:19 +00:00
|
|
|
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
|
|
|
}
|
|
|
|
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
const CacheShard* GetShard(uint32_t shard) const override {
|
2016-08-19 19:28:19 +00:00
|
|
|
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
void* Value(Handle* handle) override {
|
2016-08-19 19:28:19 +00:00
|
|
|
return reinterpret_cast<const CacheHandle*>(handle)->value;
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
size_t GetCharge(Handle* handle) const override {
|
2016-08-19 19:28:19 +00:00
|
|
|
return reinterpret_cast<const CacheHandle*>(handle)->charge;
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
uint32_t GetHash(Handle* handle) const override {
|
2016-08-19 19:28:19 +00:00
|
|
|
return reinterpret_cast<const CacheHandle*>(handle)->hash;
|
|
|
|
}
|
|
|
|
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
DeleterFn GetDeleter(Handle* handle) const override {
|
|
|
|
return reinterpret_cast<const CacheHandle*>(handle)->deleter;
|
|
|
|
}
|
|
|
|
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
void DisownData() override {
|
2021-11-29 18:52:32 +00:00
|
|
|
// Leak data only if that won't generate an ASAN/valgrind warning
|
|
|
|
if (!kMustFreeHeapAllocations) {
|
|
|
|
shards_ = nullptr;
|
|
|
|
}
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2021-05-14 05:57:51 +00:00
|
|
|
void WaitAll(std::vector<Handle*>& /*handles*/) override {}
|
|
|
|
|
2016-08-19 19:28:19 +00:00
|
|
|
private:
|
|
|
|
ClockCacheShard* shards_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2019-09-16 22:14:51 +00:00
|
|
|
std::shared_ptr<Cache> NewClockCache(
|
|
|
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy) {
|
2017-01-27 14:35:41 +00:00
|
|
|
if (num_shard_bits < 0) {
|
|
|
|
num_shard_bits = GetDefaultCacheShardBits(capacity);
|
|
|
|
}
|
2019-09-16 22:14:51 +00:00
|
|
|
return std::make_shared<ClockCache>(
|
|
|
|
capacity, num_shard_bits, strict_capacity_limit, metadata_charge_policy);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-08-19 19:28:19 +00:00
|
|
|
|
|
|
|
#endif // SUPPORT_CLOCK_CACHE
|