2021-08-24 19:42:31 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "cache/cache_reservation_manager.h"
|
|
|
|
|
|
|
|
#include <cstddef>
|
|
|
|
#include <cstring>
|
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
#include "cache/cache_entry_roles.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
class CacheReservationManagerTest : public ::testing::Test {
|
|
|
|
protected:
|
2021-11-01 21:42:11 +00:00
|
|
|
static constexpr std::size_t kSizeDummyEntry =
|
2022-04-06 17:33:00 +00:00
|
|
|
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
2021-11-05 23:12:11 +00:00
|
|
|
static constexpr std::size_t kCacheCapacity = 4096 * kSizeDummyEntry;
|
|
|
|
static constexpr int kNumShardBits = 0; // 2^0 shard
|
2021-08-24 19:42:31 +00:00
|
|
|
static constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
|
|
|
|
2021-11-05 23:12:11 +00:00
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(kCacheCapacity, kNumShardBits);
|
2022-04-06 17:33:00 +00:00
|
|
|
std::shared_ptr<CacheReservationManager> test_cache_rev_mng;
|
2021-08-24 19:42:31 +00:00
|
|
|
|
|
|
|
CacheReservationManagerTest() {
|
2022-04-06 17:33:00 +00:00
|
|
|
test_cache_rev_mng =
|
|
|
|
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
|
|
|
cache);
|
2021-08-24 19:42:31 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CacheReservationManagerTest, GenerateCacheKey) {
|
|
|
|
std::size_t new_mem_used = 1 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(),
|
|
|
|
1 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
|
|
|
|
New stable, fixed-length cache keys (#9126)
Summary:
This change standardizes on a new 16-byte cache key format for
block cache (incl compressed and secondary) and persistent cache (but
not table cache and row cache).
The goal is a really fast cache key with practically ideal stability and
uniqueness properties without external dependencies (e.g. from FileSystem).
A fixed key size of 16 bytes should enable future optimizations to the
concurrent hash table for block cache, which is a heavy CPU user /
bottleneck, but there appears to be measurable performance improvement
even with no changes to LRUCache.
This change replaces a lot of disjointed and ugly code handling cache
keys with calls to a simple, clean new internal API (cache_key.h).
(Preserving the old cache key logic under an option would be very ugly
and likely negate the performance gain of the new approach. Complete
replacement carries some inherent risk, but I think that's acceptable
with sufficient analysis and testing.)
The scheme for encoding new cache keys is complicated but explained
in cache_key.cc.
Also: EndianSwapValue is moved to math.h to be next to other bit
operations. (Explains some new include "math.h".) ReverseBits operation
added and unit tests added to hash_test for both.
Fixes https://github.com/facebook/rocksdb/issues/7405 (presuming a root cause)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9126
Test Plan:
### Basic correctness
Several tests needed updates to work with the new functionality, mostly
because we are no longer relying on filesystem for stable cache keys
so table builders & readers need more context info to agree on cache
keys. This functionality is so core, a huge number of existing tests
exercise the cache key functionality.
### Performance
Create db with
`TEST_TMPDIR=/dev/shm ./db_bench -bloom_bits=10 -benchmarks=fillrandom -num=3000000 -partition_index_and_filters`
And test performance with
`TEST_TMPDIR=/dev/shm ./db_bench -readonly -use_existing_db -bloom_bits=10 -benchmarks=readrandom -num=3000000 -duration=30 -cache_index_and_filter_blocks -cache_size=250000 -threads=4`
using DEBUG_LEVEL=0 and simultaneous before & after runs.
Before ops/sec, avg over 100 runs: 121924
After ops/sec, avg over 100 runs: 125385 (+2.8%)
### Collision probability
I have built a tool, ./cache_bench -stress_cache_key to broadly simulate host-wide cache activity
over many months, by making some pessimistic simplifying assumptions:
* Every generated file has a cache entry for every byte offset in the file (contiguous range of cache keys)
* All of every file is cached for its entire lifetime
We use a simple table with skewed address assignment and replacement on address collision
to simulate files coming & going, with quite a variance (super-Poisson) in ages. Some output
with `./cache_bench -stress_cache_key -sck_keep_bits=40`:
```
Total cache or DBs size: 32TiB Writing 925.926 MiB/s or 76.2939TiB/day
Multiply by 9.22337e+18 to correct for simulation losses (but still assume whole file cached)
```
These come from default settings of 2.5M files per day of 32 MB each, and
`-sck_keep_bits=40` means that to represent a single file, we are only keeping 40 bits of
the 128-bit cache key. With file size of 2\*\*25 contiguous keys (pessimistic), our simulation
is about 2\*\*(128-40-25) or about 9 billion billion times more prone to collision than reality.
More default assumptions, relatively pessimistic:
* 100 DBs in same process (doesn't matter much)
* Re-open DB in same process (new session ID related to old session ID) on average
every 100 files generated
* Restart process (all new session IDs unrelated to old) 24 times per day
After enough data, we get a result at the end:
```
(keep 40 bits) 17 collisions after 2 x 90 days, est 10.5882 days between (9.76592e+19 corrected)
```
If we believe the (pessimistic) simulation and the mathematical generalization, we would need to run a billion machines all for 97 billion days to expect a cache key collision. To help verify that our generalization ("corrected") is robust, we can make our simulation more precise with `-sck_keep_bits=41` and `42`, which takes more running time to get enough data:
```
(keep 41 bits) 16 collisions after 4 x 90 days, est 22.5 days between (1.03763e+20 corrected)
(keep 42 bits) 19 collisions after 10 x 90 days, est 47.3684 days between (1.09224e+20 corrected)
```
The generalized prediction still holds. With the `-sck_randomize` option, we can see that we are beating "random" cache keys (except offsets still non-randomized) by a modest amount (roughly 20x less collision prone than random), which should make us reasonably comfortable even in "degenerate" cases:
```
197 collisions after 1 x 90 days, est 0.456853 days between (4.21372e+18 corrected)
```
I've run other tests to validate other conditions behave as expected, never behaving "worse than random" unless we start chopping off structured data.
Reviewed By: zhichao-cao
Differential Revision: D33171746
Pulled By: pdillinger
fbshipit-source-id: f16a57e369ed37be5e7e33525ace848d0537c88f
2021-12-17 01:13:55 +00:00
|
|
|
// Next unique Cache key
|
|
|
|
CacheKey ckey = CacheKey::CreateUniqueForCacheLifetime(cache.get());
|
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
2022-04-13 14:34:01 +00:00
|
|
|
// Get to the underlying values
|
Derive cache keys from SST unique IDs (#10394)
Summary:
... so that cache keys can be derived from DB manifest data
before reading the file from storage--so that every part of the file
can potentially go in a persistent cache.
See updated comments in cache_key.cc for technical details. Importantly,
the new cache key encoding uses some fancy but efficient math to pack
data into the cache key without depending on the sizes of the various
pieces. This simplifies some existing code creating cache keys, like
cache warming before the file size is known.
This should provide us an essentially permanent mapping between SST
unique IDs and base cache keys, with the ability to "upgrade" SST
unique IDs (and thus cache keys) with new SST format_versions.
These cache keys are of similar, perhaps indistinguishable quality to
the previous generation. Before this change (see "corrected" days
between collision):
```
./cache_bench -stress_cache_key -sck_keep_bits=43
18 collisions after 2 x 90 days, est 10 days between (1.15292e+19 corrected)
```
After this change (keep 43 bits, up through 50, to validate "trajectory"
is ok on "corrected" days between collision):
```
19 collisions after 3 x 90 days, est 14.2105 days between (1.63836e+19 corrected)
16 collisions after 5 x 90 days, est 28.125 days between (1.6213e+19 corrected)
15 collisions after 7 x 90 days, est 42 days between (1.21057e+19 corrected)
15 collisions after 17 x 90 days, est 102 days between (1.46997e+19 corrected)
15 collisions after 49 x 90 days, est 294 days between (2.11849e+19 corrected)
15 collisions after 62 x 90 days, est 372 days between (1.34027e+19 corrected)
15 collisions after 53 x 90 days, est 318 days between (5.72858e+18 corrected)
15 collisions after 309 x 90 days, est 1854 days between (1.66994e+19 corrected)
```
However, the change does modify (probably weaken) the "guaranteed unique" promise from this
> SST files generated in a single process are guaranteed to have unique cache keys, unless/until number session ids * max file number = 2**86
to this (see https://github.com/facebook/rocksdb/issues/10388)
> With the DB id limitation, we only have nice guaranteed unique cache keys for files generated in a single process until biggest session_id_counter and offset_in_file reach combined 64 bits
I don't think this is a practical concern, though.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10394
Test Plan: unit tests updated, see simulation results above
Reviewed By: jay-zhuang
Differential Revision: D38667529
Pulled By: pdillinger
fbshipit-source-id: 49af3fe7f47e5b61162809a78b76c769fd519fba
2022-08-12 20:49:49 +00:00
|
|
|
uint64_t* ckey_data = reinterpret_cast<uint64_t*>(&ckey);
|
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
2022-04-13 14:34:01 +00:00
|
|
|
// Back it up to the one used by CRM (using CacheKey implementation details)
|
Derive cache keys from SST unique IDs (#10394)
Summary:
... so that cache keys can be derived from DB manifest data
before reading the file from storage--so that every part of the file
can potentially go in a persistent cache.
See updated comments in cache_key.cc for technical details. Importantly,
the new cache key encoding uses some fancy but efficient math to pack
data into the cache key without depending on the sizes of the various
pieces. This simplifies some existing code creating cache keys, like
cache warming before the file size is known.
This should provide us an essentially permanent mapping between SST
unique IDs and base cache keys, with the ability to "upgrade" SST
unique IDs (and thus cache keys) with new SST format_versions.
These cache keys are of similar, perhaps indistinguishable quality to
the previous generation. Before this change (see "corrected" days
between collision):
```
./cache_bench -stress_cache_key -sck_keep_bits=43
18 collisions after 2 x 90 days, est 10 days between (1.15292e+19 corrected)
```
After this change (keep 43 bits, up through 50, to validate "trajectory"
is ok on "corrected" days between collision):
```
19 collisions after 3 x 90 days, est 14.2105 days between (1.63836e+19 corrected)
16 collisions after 5 x 90 days, est 28.125 days between (1.6213e+19 corrected)
15 collisions after 7 x 90 days, est 42 days between (1.21057e+19 corrected)
15 collisions after 17 x 90 days, est 102 days between (1.46997e+19 corrected)
15 collisions after 49 x 90 days, est 294 days between (2.11849e+19 corrected)
15 collisions after 62 x 90 days, est 372 days between (1.34027e+19 corrected)
15 collisions after 53 x 90 days, est 318 days between (5.72858e+18 corrected)
15 collisions after 309 x 90 days, est 1854 days between (1.66994e+19 corrected)
```
However, the change does modify (probably weaken) the "guaranteed unique" promise from this
> SST files generated in a single process are guaranteed to have unique cache keys, unless/until number session ids * max file number = 2**86
to this (see https://github.com/facebook/rocksdb/issues/10388)
> With the DB id limitation, we only have nice guaranteed unique cache keys for files generated in a single process until biggest session_id_counter and offset_in_file reach combined 64 bits
I don't think this is a practical concern, though.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10394
Test Plan: unit tests updated, see simulation results above
Reviewed By: jay-zhuang
Differential Revision: D38667529
Pulled By: pdillinger
fbshipit-source-id: 49af3fe7f47e5b61162809a78b76c769fd519fba
2022-08-12 20:49:49 +00:00
|
|
|
ckey_data[1]--;
|
New stable, fixed-length cache keys (#9126)
Summary:
This change standardizes on a new 16-byte cache key format for
block cache (incl compressed and secondary) and persistent cache (but
not table cache and row cache).
The goal is a really fast cache key with practically ideal stability and
uniqueness properties without external dependencies (e.g. from FileSystem).
A fixed key size of 16 bytes should enable future optimizations to the
concurrent hash table for block cache, which is a heavy CPU user /
bottleneck, but there appears to be measurable performance improvement
even with no changes to LRUCache.
This change replaces a lot of disjointed and ugly code handling cache
keys with calls to a simple, clean new internal API (cache_key.h).
(Preserving the old cache key logic under an option would be very ugly
and likely negate the performance gain of the new approach. Complete
replacement carries some inherent risk, but I think that's acceptable
with sufficient analysis and testing.)
The scheme for encoding new cache keys is complicated but explained
in cache_key.cc.
Also: EndianSwapValue is moved to math.h to be next to other bit
operations. (Explains some new include "math.h".) ReverseBits operation
added and unit tests added to hash_test for both.
Fixes https://github.com/facebook/rocksdb/issues/7405 (presuming a root cause)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9126
Test Plan:
### Basic correctness
Several tests needed updates to work with the new functionality, mostly
because we are no longer relying on filesystem for stable cache keys
so table builders & readers need more context info to agree on cache
keys. This functionality is so core, a huge number of existing tests
exercise the cache key functionality.
### Performance
Create db with
`TEST_TMPDIR=/dev/shm ./db_bench -bloom_bits=10 -benchmarks=fillrandom -num=3000000 -partition_index_and_filters`
And test performance with
`TEST_TMPDIR=/dev/shm ./db_bench -readonly -use_existing_db -bloom_bits=10 -benchmarks=readrandom -num=3000000 -duration=30 -cache_index_and_filter_blocks -cache_size=250000 -threads=4`
using DEBUG_LEVEL=0 and simultaneous before & after runs.
Before ops/sec, avg over 100 runs: 121924
After ops/sec, avg over 100 runs: 125385 (+2.8%)
### Collision probability
I have built a tool, ./cache_bench -stress_cache_key to broadly simulate host-wide cache activity
over many months, by making some pessimistic simplifying assumptions:
* Every generated file has a cache entry for every byte offset in the file (contiguous range of cache keys)
* All of every file is cached for its entire lifetime
We use a simple table with skewed address assignment and replacement on address collision
to simulate files coming & going, with quite a variance (super-Poisson) in ages. Some output
with `./cache_bench -stress_cache_key -sck_keep_bits=40`:
```
Total cache or DBs size: 32TiB Writing 925.926 MiB/s or 76.2939TiB/day
Multiply by 9.22337e+18 to correct for simulation losses (but still assume whole file cached)
```
These come from default settings of 2.5M files per day of 32 MB each, and
`-sck_keep_bits=40` means that to represent a single file, we are only keeping 40 bits of
the 128-bit cache key. With file size of 2\*\*25 contiguous keys (pessimistic), our simulation
is about 2\*\*(128-40-25) or about 9 billion billion times more prone to collision than reality.
More default assumptions, relatively pessimistic:
* 100 DBs in same process (doesn't matter much)
* Re-open DB in same process (new session ID related to old session ID) on average
every 100 files generated
* Restart process (all new session IDs unrelated to old) 24 times per day
After enough data, we get a result at the end:
```
(keep 40 bits) 17 collisions after 2 x 90 days, est 10.5882 days between (9.76592e+19 corrected)
```
If we believe the (pessimistic) simulation and the mathematical generalization, we would need to run a billion machines all for 97 billion days to expect a cache key collision. To help verify that our generalization ("corrected") is robust, we can make our simulation more precise with `-sck_keep_bits=41` and `42`, which takes more running time to get enough data:
```
(keep 41 bits) 16 collisions after 4 x 90 days, est 22.5 days between (1.03763e+20 corrected)
(keep 42 bits) 19 collisions after 10 x 90 days, est 47.3684 days between (1.09224e+20 corrected)
```
The generalized prediction still holds. With the `-sck_randomize` option, we can see that we are beating "random" cache keys (except offsets still non-randomized) by a modest amount (roughly 20x less collision prone than random), which should make us reasonably comfortable even in "degenerate" cases:
```
197 collisions after 1 x 90 days, est 0.456853 days between (4.21372e+18 corrected)
```
I've run other tests to validate other conditions behave as expected, never behaving "worse than random" unless we start chopping off structured data.
Reviewed By: zhichao-cao
Differential Revision: D33171746
Pulled By: pdillinger
fbshipit-source-id: f16a57e369ed37be5e7e33525ace848d0537c88f
2021-12-17 01:13:55 +00:00
|
|
|
|
|
|
|
// Specific key (subject to implementation details)
|
Derive cache keys from SST unique IDs (#10394)
Summary:
... so that cache keys can be derived from DB manifest data
before reading the file from storage--so that every part of the file
can potentially go in a persistent cache.
See updated comments in cache_key.cc for technical details. Importantly,
the new cache key encoding uses some fancy but efficient math to pack
data into the cache key without depending on the sizes of the various
pieces. This simplifies some existing code creating cache keys, like
cache warming before the file size is known.
This should provide us an essentially permanent mapping between SST
unique IDs and base cache keys, with the ability to "upgrade" SST
unique IDs (and thus cache keys) with new SST format_versions.
These cache keys are of similar, perhaps indistinguishable quality to
the previous generation. Before this change (see "corrected" days
between collision):
```
./cache_bench -stress_cache_key -sck_keep_bits=43
18 collisions after 2 x 90 days, est 10 days between (1.15292e+19 corrected)
```
After this change (keep 43 bits, up through 50, to validate "trajectory"
is ok on "corrected" days between collision):
```
19 collisions after 3 x 90 days, est 14.2105 days between (1.63836e+19 corrected)
16 collisions after 5 x 90 days, est 28.125 days between (1.6213e+19 corrected)
15 collisions after 7 x 90 days, est 42 days between (1.21057e+19 corrected)
15 collisions after 17 x 90 days, est 102 days between (1.46997e+19 corrected)
15 collisions after 49 x 90 days, est 294 days between (2.11849e+19 corrected)
15 collisions after 62 x 90 days, est 372 days between (1.34027e+19 corrected)
15 collisions after 53 x 90 days, est 318 days between (5.72858e+18 corrected)
15 collisions after 309 x 90 days, est 1854 days between (1.66994e+19 corrected)
```
However, the change does modify (probably weaken) the "guaranteed unique" promise from this
> SST files generated in a single process are guaranteed to have unique cache keys, unless/until number session ids * max file number = 2**86
to this (see https://github.com/facebook/rocksdb/issues/10388)
> With the DB id limitation, we only have nice guaranteed unique cache keys for files generated in a single process until biggest session_id_counter and offset_in_file reach combined 64 bits
I don't think this is a practical concern, though.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10394
Test Plan: unit tests updated, see simulation results above
Reviewed By: jay-zhuang
Differential Revision: D38667529
Pulled By: pdillinger
fbshipit-source-id: 49af3fe7f47e5b61162809a78b76c769fd519fba
2022-08-12 20:49:49 +00:00
|
|
|
EXPECT_EQ(ckey_data[0], 0);
|
|
|
|
EXPECT_EQ(ckey_data[1], 2);
|
New stable, fixed-length cache keys (#9126)
Summary:
This change standardizes on a new 16-byte cache key format for
block cache (incl compressed and secondary) and persistent cache (but
not table cache and row cache).
The goal is a really fast cache key with practically ideal stability and
uniqueness properties without external dependencies (e.g. from FileSystem).
A fixed key size of 16 bytes should enable future optimizations to the
concurrent hash table for block cache, which is a heavy CPU user /
bottleneck, but there appears to be measurable performance improvement
even with no changes to LRUCache.
This change replaces a lot of disjointed and ugly code handling cache
keys with calls to a simple, clean new internal API (cache_key.h).
(Preserving the old cache key logic under an option would be very ugly
and likely negate the performance gain of the new approach. Complete
replacement carries some inherent risk, but I think that's acceptable
with sufficient analysis and testing.)
The scheme for encoding new cache keys is complicated but explained
in cache_key.cc.
Also: EndianSwapValue is moved to math.h to be next to other bit
operations. (Explains some new include "math.h".) ReverseBits operation
added and unit tests added to hash_test for both.
Fixes https://github.com/facebook/rocksdb/issues/7405 (presuming a root cause)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9126
Test Plan:
### Basic correctness
Several tests needed updates to work with the new functionality, mostly
because we are no longer relying on filesystem for stable cache keys
so table builders & readers need more context info to agree on cache
keys. This functionality is so core, a huge number of existing tests
exercise the cache key functionality.
### Performance
Create db with
`TEST_TMPDIR=/dev/shm ./db_bench -bloom_bits=10 -benchmarks=fillrandom -num=3000000 -partition_index_and_filters`
And test performance with
`TEST_TMPDIR=/dev/shm ./db_bench -readonly -use_existing_db -bloom_bits=10 -benchmarks=readrandom -num=3000000 -duration=30 -cache_index_and_filter_blocks -cache_size=250000 -threads=4`
using DEBUG_LEVEL=0 and simultaneous before & after runs.
Before ops/sec, avg over 100 runs: 121924
After ops/sec, avg over 100 runs: 125385 (+2.8%)
### Collision probability
I have built a tool, ./cache_bench -stress_cache_key to broadly simulate host-wide cache activity
over many months, by making some pessimistic simplifying assumptions:
* Every generated file has a cache entry for every byte offset in the file (contiguous range of cache keys)
* All of every file is cached for its entire lifetime
We use a simple table with skewed address assignment and replacement on address collision
to simulate files coming & going, with quite a variance (super-Poisson) in ages. Some output
with `./cache_bench -stress_cache_key -sck_keep_bits=40`:
```
Total cache or DBs size: 32TiB Writing 925.926 MiB/s or 76.2939TiB/day
Multiply by 9.22337e+18 to correct for simulation losses (but still assume whole file cached)
```
These come from default settings of 2.5M files per day of 32 MB each, and
`-sck_keep_bits=40` means that to represent a single file, we are only keeping 40 bits of
the 128-bit cache key. With file size of 2\*\*25 contiguous keys (pessimistic), our simulation
is about 2\*\*(128-40-25) or about 9 billion billion times more prone to collision than reality.
More default assumptions, relatively pessimistic:
* 100 DBs in same process (doesn't matter much)
* Re-open DB in same process (new session ID related to old session ID) on average
every 100 files generated
* Restart process (all new session IDs unrelated to old) 24 times per day
After enough data, we get a result at the end:
```
(keep 40 bits) 17 collisions after 2 x 90 days, est 10.5882 days between (9.76592e+19 corrected)
```
If we believe the (pessimistic) simulation and the mathematical generalization, we would need to run a billion machines all for 97 billion days to expect a cache key collision. To help verify that our generalization ("corrected") is robust, we can make our simulation more precise with `-sck_keep_bits=41` and `42`, which takes more running time to get enough data:
```
(keep 41 bits) 16 collisions after 4 x 90 days, est 22.5 days between (1.03763e+20 corrected)
(keep 42 bits) 19 collisions after 10 x 90 days, est 47.3684 days between (1.09224e+20 corrected)
```
The generalized prediction still holds. With the `-sck_randomize` option, we can see that we are beating "random" cache keys (except offsets still non-randomized) by a modest amount (roughly 20x less collision prone than random), which should make us reasonably comfortable even in "degenerate" cases:
```
197 collisions after 1 x 90 days, est 0.456853 days between (4.21372e+18 corrected)
```
I've run other tests to validate other conditions behave as expected, never behaving "worse than random" unless we start chopping off structured data.
Reviewed By: zhichao-cao
Differential Revision: D33171746
Pulled By: pdillinger
fbshipit-source-id: f16a57e369ed37be5e7e33525ace848d0537c88f
2021-12-17 01:13:55 +00:00
|
|
|
|
|
|
|
Cache::Handle* handle = cache->Lookup(ckey.AsSlice());
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_NE(handle, nullptr)
|
|
|
|
<< "Failed to generate the cache key for the dummy entry correctly";
|
|
|
|
// Clean up the returned handle from Lookup() to prevent memory leak
|
|
|
|
cache->Release(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CacheReservationManagerTest, KeepCacheReservationTheSame) {
|
|
|
|
std::size_t new_mem_used = 1 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
1 * kSizeDummyEntry);
|
2021-11-09 16:15:29 +00:00
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
std::size_t initial_pinned_usage = cache->GetPinnedUsage();
|
|
|
|
ASSERT_GE(initial_pinned_usage, 1 * kSizeDummyEntry);
|
|
|
|
ASSERT_LT(initial_pinned_usage,
|
|
|
|
1 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
|
|
|
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to keep cache reservation the same when new_mem_used equals "
|
|
|
|
"to current cache reservation";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep correctly when new_mem_used equals to current "
|
|
|
|
"cache reservation";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly when new_mem_used "
|
|
|
|
"equals to current cache reservation";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(cache->GetPinnedUsage(), initial_pinned_usage)
|
|
|
|
<< "Failed to keep underlying dummy entries the same when new_mem_used "
|
|
|
|
"equals to current cache reservation";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CacheReservationManagerTest,
|
|
|
|
IncreaseCacheReservationByMultiplesOfDummyEntrySize) {
|
|
|
|
std::size_t new_mem_used = 2 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to increase cache reservation correctly";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
2 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep cache reservation increase correctly";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry)
|
|
|
|
<< "Failed to increase underlying dummy entries in cache correctly";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
2 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to increase underlying dummy entries in cache correctly";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CacheReservationManagerTest,
|
|
|
|
IncreaseCacheReservationNotByMultiplesOfDummyEntrySize) {
|
|
|
|
std::size_t new_mem_used = 2 * kSizeDummyEntry + kSizeDummyEntry / 2;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to increase cache reservation correctly";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
3 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep cache reservation increase correctly";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 3 * kSizeDummyEntry)
|
|
|
|
<< "Failed to increase underlying dummy entries in cache correctly";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
3 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to increase underlying dummy entries in cache correctly";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest,
|
|
|
|
IncreaseCacheReservationOnFullCache) {
|
2021-11-01 21:42:11 +00:00
|
|
|
constexpr std::size_t kSizeDummyEntry =
|
2022-04-06 17:33:00 +00:00
|
|
|
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
2021-11-05 23:12:11 +00:00
|
|
|
constexpr std::size_t kSmallCacheCapacity = 4 * kSizeDummyEntry;
|
|
|
|
constexpr std::size_t kBigCacheCapacity = 4096 * kSizeDummyEntry;
|
2021-08-24 19:42:31 +00:00
|
|
|
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
|
|
|
|
|
|
|
LRUCacheOptions lo;
|
2021-11-05 23:12:11 +00:00
|
|
|
lo.capacity = kSmallCacheCapacity;
|
2021-08-24 19:42:31 +00:00
|
|
|
lo.num_shard_bits = 0; // 2^0 shard
|
|
|
|
lo.strict_capacity_limit = true;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
2022-04-06 17:33:00 +00:00
|
|
|
std::shared_ptr<CacheReservationManager> test_cache_rev_mng =
|
|
|
|
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
|
|
|
cache);
|
2021-08-24 19:42:31 +00:00
|
|
|
|
2021-11-05 23:12:11 +00:00
|
|
|
std::size_t new_mem_used = kSmallCacheCapacity + 1;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2022-07-06 21:41:46 +00:00
|
|
|
EXPECT_EQ(s, Status::MemoryLimit())
|
2021-08-24 19:42:31 +00:00
|
|
|
<< "Failed to return status to indicate failure of dummy entry insertion "
|
|
|
|
"during cache reservation on full cache";
|
|
|
|
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep correctly before cache resevation failure happens "
|
|
|
|
"due to full cache";
|
2021-11-05 23:12:11 +00:00
|
|
|
EXPECT_LE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
kSmallCacheCapacity)
|
2021-08-24 19:42:31 +00:00
|
|
|
<< "Failed to bookkeep correctly (i.e, bookkeep only successful dummy "
|
|
|
|
"entry insertions) when encountering cache resevation failure due to "
|
|
|
|
"full cache";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to insert underlying dummy entries correctly when "
|
|
|
|
"encountering cache resevation failure due to full cache";
|
2021-11-05 23:12:11 +00:00
|
|
|
EXPECT_LE(cache->GetPinnedUsage(), kSmallCacheCapacity)
|
2021-08-24 19:42:31 +00:00
|
|
|
<< "Failed to insert underlying dummy entries correctly when "
|
|
|
|
"encountering cache resevation failure due to full cache";
|
|
|
|
|
2021-11-05 23:12:11 +00:00
|
|
|
new_mem_used = kSmallCacheCapacity / 2; // 2 dummy entries
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to decrease cache reservation after encountering cache "
|
|
|
|
"reservation failure due to full cache";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
2 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep cache reservation decrease correctly after "
|
|
|
|
"encountering cache reservation due to full cache";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry)
|
|
|
|
<< "Failed to release underlying dummy entries correctly on cache "
|
|
|
|
"reservation decrease after encountering cache resevation failure due "
|
|
|
|
"to full cache";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
2 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to release underlying dummy entries correctly on cache "
|
|
|
|
"reservation decrease after encountering cache resevation failure due "
|
|
|
|
"to full cache";
|
|
|
|
|
|
|
|
// Create cache full again for subsequent tests
|
2021-11-05 23:12:11 +00:00
|
|
|
new_mem_used = kSmallCacheCapacity + 1;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2022-07-06 21:41:46 +00:00
|
|
|
EXPECT_EQ(s, Status::MemoryLimit())
|
2021-08-24 19:42:31 +00:00
|
|
|
<< "Failed to return status to indicate failure of dummy entry insertion "
|
|
|
|
"during cache reservation on full cache";
|
|
|
|
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep correctly before cache resevation failure happens "
|
|
|
|
"due to full cache";
|
2021-11-05 23:12:11 +00:00
|
|
|
EXPECT_LE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
kSmallCacheCapacity)
|
2021-08-24 19:42:31 +00:00
|
|
|
<< "Failed to bookkeep correctly (i.e, bookkeep only successful dummy "
|
|
|
|
"entry insertions) when encountering cache resevation failure due to "
|
|
|
|
"full cache";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to insert underlying dummy entries correctly when "
|
|
|
|
"encountering cache resevation failure due to full cache";
|
2021-11-05 23:12:11 +00:00
|
|
|
EXPECT_LE(cache->GetPinnedUsage(), kSmallCacheCapacity)
|
2021-08-24 19:42:31 +00:00
|
|
|
<< "Failed to insert underlying dummy entries correctly when "
|
|
|
|
"encountering cache resevation failure due to full cache";
|
|
|
|
|
|
|
|
// Increase cache capacity so the previously failed insertion can fully
|
|
|
|
// succeed
|
2021-11-05 23:12:11 +00:00
|
|
|
cache->SetCapacity(kBigCacheCapacity);
|
|
|
|
new_mem_used = kSmallCacheCapacity + 1;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to increase cache reservation after increasing cache capacity "
|
|
|
|
"and mitigating cache full error";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
5 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep cache reservation increase correctly after "
|
|
|
|
"increasing cache capacity and mitigating cache full error";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 5 * kSizeDummyEntry)
|
|
|
|
<< "Failed to insert underlying dummy entries correctly after increasing "
|
|
|
|
"cache capacity and mitigating cache full error";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
5 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to insert underlying dummy entries correctly after increasing "
|
|
|
|
"cache capacity and mitigating cache full error";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CacheReservationManagerTest,
|
|
|
|
DecreaseCacheReservationByMultiplesOfDummyEntrySize) {
|
|
|
|
std::size_t new_mem_used = 2 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
2 * kSizeDummyEntry);
|
2021-11-09 16:15:29 +00:00
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(),
|
|
|
|
2 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
|
|
|
|
|
|
|
new_mem_used = 1 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to decrease cache reservation correctly";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep cache reservation decrease correctly";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to decrease underlying dummy entries in cache correctly";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
1 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to decrease underlying dummy entries in cache correctly";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CacheReservationManagerTest,
|
|
|
|
DecreaseCacheReservationNotByMultiplesOfDummyEntrySize) {
|
|
|
|
std::size_t new_mem_used = 2 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
2 * kSizeDummyEntry);
|
2021-11-09 16:15:29 +00:00
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(),
|
|
|
|
2 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
|
|
|
|
|
|
|
new_mem_used = kSizeDummyEntry / 2;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to decrease cache reservation correctly";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep cache reservation decrease correctly";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
|
|
|
<< "Failed to decrease underlying dummy entries in cache correctly";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
1 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to decrease underlying dummy entries in cache correctly";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(CacheReservationManagerWithDelayedDecreaseTest,
|
|
|
|
DecreaseCacheReservationWithDelayedDecrease) {
|
2021-11-01 21:42:11 +00:00
|
|
|
constexpr std::size_t kSizeDummyEntry =
|
2022-04-06 17:33:00 +00:00
|
|
|
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
2021-11-05 23:12:11 +00:00
|
|
|
constexpr std::size_t kCacheCapacity = 4096 * kSizeDummyEntry;
|
2021-08-24 19:42:31 +00:00
|
|
|
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
|
|
|
|
|
|
|
LRUCacheOptions lo;
|
2021-11-05 23:12:11 +00:00
|
|
|
lo.capacity = kCacheCapacity;
|
2021-08-24 19:42:31 +00:00
|
|
|
lo.num_shard_bits = 0;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
2022-04-06 17:33:00 +00:00
|
|
|
std::shared_ptr<CacheReservationManager> test_cache_rev_mng =
|
|
|
|
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
|
|
|
cache, true /* delayed_decrease */);
|
2021-08-24 19:42:31 +00:00
|
|
|
|
|
|
|
std::size_t new_mem_used = 8 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
8 * kSizeDummyEntry);
|
2021-11-09 16:15:29 +00:00
|
|
|
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
std::size_t initial_pinned_usage = cache->GetPinnedUsage();
|
|
|
|
ASSERT_GE(initial_pinned_usage, 8 * kSizeDummyEntry);
|
|
|
|
ASSERT_LT(initial_pinned_usage,
|
|
|
|
8 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
|
|
|
|
|
|
|
new_mem_used = 6 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK()) << "Failed to delay decreasing cache reservation";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
8 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep correctly when delaying cache reservation "
|
|
|
|
"decrease";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(cache->GetPinnedUsage(), initial_pinned_usage)
|
|
|
|
<< "Failed to delay decreasing underlying dummy entries in cache";
|
|
|
|
|
|
|
|
new_mem_used = 7 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK()) << "Failed to delay decreasing cache reservation";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
8 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep correctly when delaying cache reservation "
|
|
|
|
"decrease";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(cache->GetPinnedUsage(), initial_pinned_usage)
|
|
|
|
<< "Failed to delay decreasing underlying dummy entries in cache";
|
|
|
|
|
|
|
|
new_mem_used = 6 * kSizeDummyEntry - 1;
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_EQ(s, Status::OK())
|
|
|
|
<< "Failed to decrease cache reservation correctly when new_mem_used < "
|
|
|
|
"GetTotalReservedCacheSize() * 3 / 4 on delayed decrease mode";
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
|
|
|
6 * kSizeDummyEntry)
|
|
|
|
<< "Failed to bookkeep correctly when new_mem_used < "
|
|
|
|
"GetTotalReservedCacheSize() * 3 / 4 on delayed decrease mode";
|
2021-11-09 16:15:29 +00:00
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
|
|
|
<< "Failed to bookkeep the used memory correctly";
|
2021-08-24 19:42:31 +00:00
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), 6 * kSizeDummyEntry)
|
|
|
|
<< "Failed to decrease underlying dummy entries in cache when "
|
|
|
|
"new_mem_used < GetTotalReservedCacheSize() * 3 / 4 on delayed "
|
|
|
|
"decrease mode";
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(),
|
|
|
|
6 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
|
|
|
<< "Failed to decrease underlying dummy entries in cache when "
|
|
|
|
"new_mem_used < GetTotalReservedCacheSize() * 3 / 4 on delayed "
|
|
|
|
"decrease mode";
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(CacheReservationManagerDestructorTest,
|
|
|
|
ReleaseRemainingDummyEntriesOnDestruction) {
|
2021-11-01 21:42:11 +00:00
|
|
|
constexpr std::size_t kSizeDummyEntry =
|
2022-04-06 17:33:00 +00:00
|
|
|
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
2021-11-05 23:12:11 +00:00
|
|
|
constexpr std::size_t kCacheCapacity = 4096 * kSizeDummyEntry;
|
2021-08-24 19:42:31 +00:00
|
|
|
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
|
|
|
|
|
|
|
LRUCacheOptions lo;
|
2021-11-05 23:12:11 +00:00
|
|
|
lo.capacity = kCacheCapacity;
|
2021-08-24 19:42:31 +00:00
|
|
|
lo.num_shard_bits = 0;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
|
|
|
{
|
2022-04-06 17:33:00 +00:00
|
|
|
std::shared_ptr<CacheReservationManager> test_cache_rev_mng =
|
|
|
|
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
|
|
|
cache);
|
2021-08-24 19:42:31 +00:00
|
|
|
std::size_t new_mem_used = 1 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
2021-08-24 19:42:31 +00:00
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(),
|
|
|
|
1 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
|
|
|
}
|
|
|
|
EXPECT_EQ(cache->GetPinnedUsage(), 0 * kSizeDummyEntry)
|
|
|
|
<< "Failed to release remaining underlying dummy entries in cache in "
|
|
|
|
"CacheReservationManager's destructor";
|
|
|
|
}
|
2021-11-09 20:04:51 +00:00
|
|
|
|
|
|
|
TEST(CacheReservationHandleTest, HandleTest) {
|
|
|
|
constexpr std::size_t kOneGigabyte = 1024 * 1024 * 1024;
|
|
|
|
constexpr std::size_t kSizeDummyEntry = 256 * 1024;
|
|
|
|
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
|
|
|
|
|
|
|
LRUCacheOptions lo;
|
|
|
|
lo.capacity = kOneGigabyte;
|
|
|
|
lo.num_shard_bits = 0;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
|
|
|
|
|
|
|
std::shared_ptr<CacheReservationManager> test_cache_rev_mng(
|
2022-04-06 17:33:00 +00:00
|
|
|
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
|
|
|
cache));
|
2021-11-09 20:04:51 +00:00
|
|
|
|
|
|
|
std::size_t mem_used = 0;
|
|
|
|
const std::size_t incremental_mem_used_handle_1 = 1 * kSizeDummyEntry;
|
|
|
|
const std::size_t incremental_mem_used_handle_2 = 2 * kSizeDummyEntry;
|
2022-04-06 17:33:00 +00:00
|
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle> handle_1,
|
2021-11-09 20:04:51 +00:00
|
|
|
handle_2;
|
|
|
|
|
|
|
|
// To test consecutive CacheReservationManager::MakeCacheReservation works
|
|
|
|
// correctly in terms of returning the handle as well as updating cache
|
|
|
|
// reservation and the latest total memory used
|
2022-04-06 17:33:00 +00:00
|
|
|
Status s = test_cache_rev_mng->MakeCacheReservation(
|
2021-11-09 20:04:51 +00:00
|
|
|
incremental_mem_used_handle_1, &handle_1);
|
|
|
|
mem_used = mem_used + incremental_mem_used_handle_1;
|
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
EXPECT_TRUE(handle_1 != nullptr);
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(), mem_used);
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), mem_used);
|
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
|
|
|
|
2022-04-06 17:33:00 +00:00
|
|
|
s = test_cache_rev_mng->MakeCacheReservation(incremental_mem_used_handle_2,
|
|
|
|
&handle_2);
|
2021-11-09 20:04:51 +00:00
|
|
|
mem_used = mem_used + incremental_mem_used_handle_2;
|
|
|
|
ASSERT_EQ(s, Status::OK());
|
|
|
|
EXPECT_TRUE(handle_2 != nullptr);
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(), mem_used);
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), mem_used);
|
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
|
|
|
|
2022-04-06 17:33:00 +00:00
|
|
|
// To test
|
|
|
|
// CacheReservationManager::CacheReservationHandle::~CacheReservationHandle()
|
|
|
|
// works correctly in releasing the cache reserved for the handle
|
2021-11-09 20:04:51 +00:00
|
|
|
handle_1.reset();
|
|
|
|
EXPECT_TRUE(handle_1 == nullptr);
|
|
|
|
mem_used = mem_used - incremental_mem_used_handle_1;
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(), mem_used);
|
|
|
|
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), mem_used);
|
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
|
|
|
|
|
|
|
// To test the actual CacheReservationManager object won't be deallocated
|
|
|
|
// as long as there remain handles pointing to it.
|
|
|
|
// We strongly recommend deallocating CacheReservationManager object only
|
|
|
|
// after all its handles are deallocated to keep things easy to reasonate
|
|
|
|
test_cache_rev_mng.reset();
|
|
|
|
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
|
|
|
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
|
|
|
|
|
|
|
handle_2.reset();
|
|
|
|
// The CacheReservationManager object is now deallocated since all the handles
|
|
|
|
// and its original pointer is gone
|
|
|
|
mem_used = mem_used - incremental_mem_used_handle_2;
|
|
|
|
EXPECT_EQ(mem_used, 0);
|
|
|
|
EXPECT_EQ(cache->GetPinnedUsage(), mem_used);
|
|
|
|
}
|
2021-08-24 19:42:31 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2021-08-24 19:42:31 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2021-11-01 21:42:11 +00:00
|
|
|
}
|