2022-02-24 00:06:27 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
Split cache to minimize internal fragmentation (#10287)
Summary:
### **Summary:**
To minimize the internal fragmentation caused by the variable size of the compressed blocks, the original block is split according to the jemalloc bin size in `Insert()` and then merged back in `Lookup()`. Based on the analysis of the results of the following tests, from the overall internal fragmentation perspective, this PR does mitigate the internal fragmentation issue.
_Do more myshadow tests with the latest commit. I finished several myshadow AB Testing and the results are promising. For the config of 4GB primary cache and 3GB secondary cache, Jemalloc resident stats shows consistently ~0.15GB memory saving; the allocated and active stats show similar memory savings. The CPU usage is almost the same before and after this PR._
To evaluate the issue of memory fragmentations and the benefits of this PR, I conducted two sets of local tests as follows.
**T1**
Keys: 16 bytes each (+ 0 bytes user-defined timestamp)
Values: 100 bytes each (50 bytes after compression)
Entries: 90000000
RawSize: 9956.4 MB (estimated)
FileSize: 5664.8 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T1_3 | 4000 | 4000 |
| T1_4 | 2000 | 3000 |
Populate the DB:
./db_bench --benchmarks=fillrandom --num=90000000 -db=/mem_fragmentation/db_bench_1
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=90000000 -use_existing_db -db=/mem_fragmentation/db_bench_1
Run read tests with differnt cache setting:
T1_3:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_3_20220710 -duration=1800 &
T1_4:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_4_20220710 -duration=1800 &
For T1_3 and T1_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T1_3 | T1_3 after mem defrag | T1_4 | T1_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8728 | 8076 | 5518 | 5043 |
| available (MB) | 8753 | 8092 | 5536 | 5051 |
| external fragmentation rate | 0.003 | 0.002 | 0.003 | 0.0016 |
| resident (MB) | 8956 | 8365 | 5655 | 5235 |
**T2**
Keys: 32 bytes each (+ 0 bytes user-defined timestamp)
Values: 256 bytes each (128 bytes after compression)
Entries: 40000000
RawSize: 10986.3 MB (estimated)
FileSize: 6103.5 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T2_3 | 4000 | 4000 |
| T2_4 | 2000 | 3000 |
Create DB (10GB):
./db_bench -benchmarks=fillrandom -use_direct_reads=true -num=40000000 -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=40000000 -use_existing_db -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Run read tests with differnt cache setting:
T2_3:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_3 -duration=1800 &
T2_4:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_4 -duration=1800 &
For T2_3 and T2_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T2_3 | T2_3 after mem defrag | T2_4 | T2_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8425 | 8093 | 5426 | 5149 |
| available (MB) | 8489 | 8138 | 5435 | 5158 |
| external fragmentation rate | 0.008 | 0.0055 | 0.0017 | 0.0017 |
| resident (MB) | 8676 | 8392 | 5541 | 5321 |
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10287
Test Plan: Unit tests.
Reviewed By: anand1976
Differential Revision: D37743362
Pulled By: gitbw95
fbshipit-source-id: 0010c5af08addeacc5ebbc4ffe5be882fb1d38ad
2022-08-02 22:28:11 +00:00
|
|
|
#include <array>
|
|
|
|
#include <cstddef>
|
2022-02-24 00:06:27 +00:00
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
#include "cache/lru_cache.h"
|
|
|
|
#include "memory/memory_allocator.h"
|
|
|
|
#include "rocksdb/secondary_cache.h"
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "util/compression.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
2022-04-11 20:28:33 +00:00
|
|
|
class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
|
2022-02-24 00:06:27 +00:00
|
|
|
public:
|
2022-04-11 20:28:33 +00:00
|
|
|
CompressedSecondaryCacheResultHandle(void* value, size_t size)
|
2022-02-24 00:06:27 +00:00
|
|
|
: value_(value), size_(size) {}
|
2022-04-11 20:28:33 +00:00
|
|
|
virtual ~CompressedSecondaryCacheResultHandle() override = default;
|
2022-02-24 00:06:27 +00:00
|
|
|
|
2022-04-11 20:28:33 +00:00
|
|
|
CompressedSecondaryCacheResultHandle(
|
|
|
|
const CompressedSecondaryCacheResultHandle&) = delete;
|
|
|
|
CompressedSecondaryCacheResultHandle& operator=(
|
|
|
|
const CompressedSecondaryCacheResultHandle&) = delete;
|
2022-02-24 00:06:27 +00:00
|
|
|
|
|
|
|
bool IsReady() override { return true; }
|
|
|
|
|
|
|
|
void Wait() override {}
|
|
|
|
|
|
|
|
void* Value() override { return value_; }
|
|
|
|
|
|
|
|
size_t Size() override { return size_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
void* value_;
|
|
|
|
size_t size_;
|
|
|
|
};
|
|
|
|
|
2022-04-11 20:28:33 +00:00
|
|
|
// The CompressedSecondaryCache is a concrete implementation of
|
2022-02-24 00:06:27 +00:00
|
|
|
// rocksdb::SecondaryCache.
|
|
|
|
//
|
Avoid recompressing cold block in CompressedSecondaryCache (#10527)
Summary:
**Summary:**
When a block is firstly `Lookup` from the secondary cache, we just insert a dummy block in the primary cache (charging the actual size of the block) and don’t erase the block from the secondary cache. A standalone handle is returned from `Lookup`. Only if the block is hit again, we erase it from the secondary cache and add it into the primary cache.
When a block is firstly evicted from the primary cache to the secondary cache, we just insert a dummy block (size 0) in the secondary cache. When the block is evicted again, it is treated as a hot block and is inserted into the secondary cache.
**Implementation Details**
Add a new state of LRUHandle: The handle is never inserted into the LRUCache (both hash table and LRU list) and it doesn't experience the above three states. The entry can be freed when refs becomes 0. (refs >= 1 && in_cache == false && IS_STANDALONE == true)
The behaviors of `LRUCacheShard::Lookup()` are updated if the secondary_cache is CompressedSecondaryCache:
1. If a handle is found in primary cache:
1.1. If the handle's value is not nullptr, it is returned immediately.
1.2. If the handle's value is nullptr, this means the handle is a dummy one. For a dummy handle, if it was retrieved from secondary cache, it may still exist in secondary cache.
- 1.2.1. If no valid handle can be `Lookup` from secondary cache, return nullptr.
- 1.2.2. If the handle from secondary cache is valid, erase it from the secondary cache and add it into the primary cache.
2. If a handle is not found in primary cache:
2.1. If no valid handle can be `Lookup` from secondary cache, return nullptr.
2.2. If the handle from secondary cache is valid, insert a dummy block in the primary cache (charging the actual size of the block) and return a standalone handle.
The behaviors of `LRUCacheShard::Promote()` are updated as follows:
1. If `e->sec_handle` has value, one of the following steps can happen:
1.1. Insert a dummy handle and return a standalone handle to caller when `secondary_cache_` is `CompressedSecondaryCache` and e is a standalone handle.
1.2. Insert the item into the primary cache and return the handle to caller.
1.3. Exception handling.
3. If `e->sec_handle` has no value, mark the item as not in cache and charge the cache as its only metadata that'll shortly be released.
The behavior of `CompressedSecondaryCache::Insert()` is updated:
1. If a block is evicted from the primary cache for the first time, a dummy item is inserted.
4. If a dummy item is found for a block, the block is inserted into the secondary cache.
The behavior of `CompressedSecondaryCache:::Lookup()` is updated:
1. If a handle is not found or it is a dummy item, a nullptr is returned.
2. If `erase_handle` is true, the handle is erased.
The behaviors of `LRUCacheShard::Release()` are adjusted for the standalone handles.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10527
Test Plan:
1. stress tests.
5. unit tests.
6. CPU profiling for db_bench.
Reviewed By: siying
Differential Revision: D38747613
Pulled By: gitbw95
fbshipit-source-id: 74a1eba7e1957c9affb2bd2ae3e0194584fa6eca
2022-09-08 02:00:27 +00:00
|
|
|
// When a block is found from CompressedSecondaryCache::Lookup, we check whether
|
|
|
|
// there is a dummy block with the same key in the primary cache.
|
|
|
|
// 1. If the dummy block exits, we erase the block from
|
|
|
|
// CompressedSecondaryCache and insert it into the primary cache.
|
|
|
|
// 2. If not, we just insert a dummy block into the primary cache
|
|
|
|
// (charging the actual size of the block) and don not erase the block from
|
|
|
|
// CompressedSecondaryCache. A standalone handle is returned to the caller.
|
|
|
|
//
|
|
|
|
// When a block is evicted from the primary cache, we check whether
|
|
|
|
// there is a dummy block with the same key in CompressedSecondaryCache.
|
|
|
|
// 1. If the dummy block exits, the block is inserted into
|
|
|
|
// CompressedSecondaryCache.
|
|
|
|
// 2. If not, we just insert a dummy block (size 0) in CompressedSecondaryCache.
|
|
|
|
//
|
|
|
|
// Users can also cast a pointer to CompressedSecondaryCache and call methods on
|
2022-02-24 00:06:27 +00:00
|
|
|
// it directly, especially custom methods that may be added
|
|
|
|
// in the future. For example -
|
|
|
|
// std::unique_ptr<rocksdb::SecondaryCache> cache =
|
2022-04-11 20:28:33 +00:00
|
|
|
// NewCompressedSecondaryCache(opts);
|
|
|
|
// static_cast<CompressedSecondaryCache*>(cache.get())->Erase(key);
|
2022-02-24 00:06:27 +00:00
|
|
|
|
2022-04-11 20:28:33 +00:00
|
|
|
class CompressedSecondaryCache : public SecondaryCache {
|
2022-02-24 00:06:27 +00:00
|
|
|
public:
|
2022-04-11 20:28:33 +00:00
|
|
|
CompressedSecondaryCache(
|
2022-02-24 00:06:27 +00:00
|
|
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
2022-08-13 00:59:06 +00:00
|
|
|
double high_pri_pool_ratio, double low_pri_pool_ratio,
|
2022-02-24 00:06:27 +00:00
|
|
|
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
|
|
|
|
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy =
|
Split cache to minimize internal fragmentation (#10287)
Summary:
### **Summary:**
To minimize the internal fragmentation caused by the variable size of the compressed blocks, the original block is split according to the jemalloc bin size in `Insert()` and then merged back in `Lookup()`. Based on the analysis of the results of the following tests, from the overall internal fragmentation perspective, this PR does mitigate the internal fragmentation issue.
_Do more myshadow tests with the latest commit. I finished several myshadow AB Testing and the results are promising. For the config of 4GB primary cache and 3GB secondary cache, Jemalloc resident stats shows consistently ~0.15GB memory saving; the allocated and active stats show similar memory savings. The CPU usage is almost the same before and after this PR._
To evaluate the issue of memory fragmentations and the benefits of this PR, I conducted two sets of local tests as follows.
**T1**
Keys: 16 bytes each (+ 0 bytes user-defined timestamp)
Values: 100 bytes each (50 bytes after compression)
Entries: 90000000
RawSize: 9956.4 MB (estimated)
FileSize: 5664.8 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T1_3 | 4000 | 4000 |
| T1_4 | 2000 | 3000 |
Populate the DB:
./db_bench --benchmarks=fillrandom --num=90000000 -db=/mem_fragmentation/db_bench_1
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=90000000 -use_existing_db -db=/mem_fragmentation/db_bench_1
Run read tests with differnt cache setting:
T1_3:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_3_20220710 -duration=1800 &
T1_4:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_4_20220710 -duration=1800 &
For T1_3 and T1_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T1_3 | T1_3 after mem defrag | T1_4 | T1_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8728 | 8076 | 5518 | 5043 |
| available (MB) | 8753 | 8092 | 5536 | 5051 |
| external fragmentation rate | 0.003 | 0.002 | 0.003 | 0.0016 |
| resident (MB) | 8956 | 8365 | 5655 | 5235 |
**T2**
Keys: 32 bytes each (+ 0 bytes user-defined timestamp)
Values: 256 bytes each (128 bytes after compression)
Entries: 40000000
RawSize: 10986.3 MB (estimated)
FileSize: 6103.5 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T2_3 | 4000 | 4000 |
| T2_4 | 2000 | 3000 |
Create DB (10GB):
./db_bench -benchmarks=fillrandom -use_direct_reads=true -num=40000000 -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=40000000 -use_existing_db -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Run read tests with differnt cache setting:
T2_3:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_3 -duration=1800 &
T2_4:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_4 -duration=1800 &
For T2_3 and T2_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T2_3 | T2_3 after mem defrag | T2_4 | T2_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8425 | 8093 | 5426 | 5149 |
| available (MB) | 8489 | 8138 | 5435 | 5158 |
| external fragmentation rate | 0.008 | 0.0055 | 0.0017 | 0.0017 |
| resident (MB) | 8676 | 8392 | 5541 | 5321 |
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10287
Test Plan: Unit tests.
Reviewed By: anand1976
Differential Revision: D37743362
Pulled By: gitbw95
fbshipit-source-id: 0010c5af08addeacc5ebbc4ffe5be882fb1d38ad
2022-08-02 22:28:11 +00:00
|
|
|
kDefaultCacheMetadataChargePolicy,
|
2022-02-24 00:06:27 +00:00
|
|
|
CompressionType compression_type = CompressionType::kLZ4Compression,
|
2022-09-16 22:41:49 +00:00
|
|
|
uint32_t compress_format_version = 2,
|
|
|
|
bool enable_custom_split_merge = false);
|
2022-04-11 20:28:33 +00:00
|
|
|
virtual ~CompressedSecondaryCache() override;
|
2022-02-24 00:06:27 +00:00
|
|
|
|
2022-04-11 20:28:33 +00:00
|
|
|
const char* Name() const override { return "CompressedSecondaryCache"; }
|
2022-02-24 00:06:27 +00:00
|
|
|
|
|
|
|
Status Insert(const Slice& key, void* value,
|
|
|
|
const Cache::CacheItemHelper* helper) override;
|
|
|
|
|
|
|
|
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
2022-04-11 20:28:33 +00:00
|
|
|
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
Avoid recompressing cold block in CompressedSecondaryCache (#10527)
Summary:
**Summary:**
When a block is firstly `Lookup` from the secondary cache, we just insert a dummy block in the primary cache (charging the actual size of the block) and don’t erase the block from the secondary cache. A standalone handle is returned from `Lookup`. Only if the block is hit again, we erase it from the secondary cache and add it into the primary cache.
When a block is firstly evicted from the primary cache to the secondary cache, we just insert a dummy block (size 0) in the secondary cache. When the block is evicted again, it is treated as a hot block and is inserted into the secondary cache.
**Implementation Details**
Add a new state of LRUHandle: The handle is never inserted into the LRUCache (both hash table and LRU list) and it doesn't experience the above three states. The entry can be freed when refs becomes 0. (refs >= 1 && in_cache == false && IS_STANDALONE == true)
The behaviors of `LRUCacheShard::Lookup()` are updated if the secondary_cache is CompressedSecondaryCache:
1. If a handle is found in primary cache:
1.1. If the handle's value is not nullptr, it is returned immediately.
1.2. If the handle's value is nullptr, this means the handle is a dummy one. For a dummy handle, if it was retrieved from secondary cache, it may still exist in secondary cache.
- 1.2.1. If no valid handle can be `Lookup` from secondary cache, return nullptr.
- 1.2.2. If the handle from secondary cache is valid, erase it from the secondary cache and add it into the primary cache.
2. If a handle is not found in primary cache:
2.1. If no valid handle can be `Lookup` from secondary cache, return nullptr.
2.2. If the handle from secondary cache is valid, insert a dummy block in the primary cache (charging the actual size of the block) and return a standalone handle.
The behaviors of `LRUCacheShard::Promote()` are updated as follows:
1. If `e->sec_handle` has value, one of the following steps can happen:
1.1. Insert a dummy handle and return a standalone handle to caller when `secondary_cache_` is `CompressedSecondaryCache` and e is a standalone handle.
1.2. Insert the item into the primary cache and return the handle to caller.
1.3. Exception handling.
3. If `e->sec_handle` has no value, mark the item as not in cache and charge the cache as its only metadata that'll shortly be released.
The behavior of `CompressedSecondaryCache::Insert()` is updated:
1. If a block is evicted from the primary cache for the first time, a dummy item is inserted.
4. If a dummy item is found for a block, the block is inserted into the secondary cache.
The behavior of `CompressedSecondaryCache:::Lookup()` is updated:
1. If a handle is not found or it is a dummy item, a nullptr is returned.
2. If `erase_handle` is true, the handle is erased.
The behaviors of `LRUCacheShard::Release()` are adjusted for the standalone handles.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10527
Test Plan:
1. stress tests.
5. unit tests.
6. CPU profiling for db_bench.
Reviewed By: siying
Differential Revision: D38747613
Pulled By: gitbw95
fbshipit-source-id: 74a1eba7e1957c9affb2bd2ae3e0194584fa6eca
2022-09-08 02:00:27 +00:00
|
|
|
bool advise_erase, bool& is_in_sec_cache) override;
|
|
|
|
|
|
|
|
bool SupportForceErase() const override { return true; }
|
2022-02-24 00:06:27 +00:00
|
|
|
|
|
|
|
void Erase(const Slice& key) override;
|
|
|
|
|
|
|
|
void WaitAll(std::vector<SecondaryCacheResultHandle*> /*handles*/) override {}
|
|
|
|
|
|
|
|
std::string GetPrintableOptions() const override;
|
|
|
|
|
|
|
|
private:
|
Split cache to minimize internal fragmentation (#10287)
Summary:
### **Summary:**
To minimize the internal fragmentation caused by the variable size of the compressed blocks, the original block is split according to the jemalloc bin size in `Insert()` and then merged back in `Lookup()`. Based on the analysis of the results of the following tests, from the overall internal fragmentation perspective, this PR does mitigate the internal fragmentation issue.
_Do more myshadow tests with the latest commit. I finished several myshadow AB Testing and the results are promising. For the config of 4GB primary cache and 3GB secondary cache, Jemalloc resident stats shows consistently ~0.15GB memory saving; the allocated and active stats show similar memory savings. The CPU usage is almost the same before and after this PR._
To evaluate the issue of memory fragmentations and the benefits of this PR, I conducted two sets of local tests as follows.
**T1**
Keys: 16 bytes each (+ 0 bytes user-defined timestamp)
Values: 100 bytes each (50 bytes after compression)
Entries: 90000000
RawSize: 9956.4 MB (estimated)
FileSize: 5664.8 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T1_3 | 4000 | 4000 |
| T1_4 | 2000 | 3000 |
Populate the DB:
./db_bench --benchmarks=fillrandom --num=90000000 -db=/mem_fragmentation/db_bench_1
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=90000000 -use_existing_db -db=/mem_fragmentation/db_bench_1
Run read tests with differnt cache setting:
T1_3:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_3_20220710 -duration=1800 &
T1_4:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_4_20220710 -duration=1800 &
For T1_3 and T1_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T1_3 | T1_3 after mem defrag | T1_4 | T1_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8728 | 8076 | 5518 | 5043 |
| available (MB) | 8753 | 8092 | 5536 | 5051 |
| external fragmentation rate | 0.003 | 0.002 | 0.003 | 0.0016 |
| resident (MB) | 8956 | 8365 | 5655 | 5235 |
**T2**
Keys: 32 bytes each (+ 0 bytes user-defined timestamp)
Values: 256 bytes each (128 bytes after compression)
Entries: 40000000
RawSize: 10986.3 MB (estimated)
FileSize: 6103.5 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T2_3 | 4000 | 4000 |
| T2_4 | 2000 | 3000 |
Create DB (10GB):
./db_bench -benchmarks=fillrandom -use_direct_reads=true -num=40000000 -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=40000000 -use_existing_db -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Run read tests with differnt cache setting:
T2_3:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_3 -duration=1800 &
T2_4:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_4 -duration=1800 &
For T2_3 and T2_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T2_3 | T2_3 after mem defrag | T2_4 | T2_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8425 | 8093 | 5426 | 5149 |
| available (MB) | 8489 | 8138 | 5435 | 5158 |
| external fragmentation rate | 0.008 | 0.0055 | 0.0017 | 0.0017 |
| resident (MB) | 8676 | 8392 | 5541 | 5321 |
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10287
Test Plan: Unit tests.
Reviewed By: anand1976
Differential Revision: D37743362
Pulled By: gitbw95
fbshipit-source-id: 0010c5af08addeacc5ebbc4ffe5be882fb1d38ad
2022-08-02 22:28:11 +00:00
|
|
|
friend class CompressedSecondaryCacheTest;
|
2022-09-16 22:41:49 +00:00
|
|
|
static constexpr std::array<uint16_t, 8> malloc_bin_sizes_{
|
|
|
|
128, 256, 512, 1024, 2048, 4096, 8192, 16384};
|
Split cache to minimize internal fragmentation (#10287)
Summary:
### **Summary:**
To minimize the internal fragmentation caused by the variable size of the compressed blocks, the original block is split according to the jemalloc bin size in `Insert()` and then merged back in `Lookup()`. Based on the analysis of the results of the following tests, from the overall internal fragmentation perspective, this PR does mitigate the internal fragmentation issue.
_Do more myshadow tests with the latest commit. I finished several myshadow AB Testing and the results are promising. For the config of 4GB primary cache and 3GB secondary cache, Jemalloc resident stats shows consistently ~0.15GB memory saving; the allocated and active stats show similar memory savings. The CPU usage is almost the same before and after this PR._
To evaluate the issue of memory fragmentations and the benefits of this PR, I conducted two sets of local tests as follows.
**T1**
Keys: 16 bytes each (+ 0 bytes user-defined timestamp)
Values: 100 bytes each (50 bytes after compression)
Entries: 90000000
RawSize: 9956.4 MB (estimated)
FileSize: 5664.8 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T1_3 | 4000 | 4000 |
| T1_4 | 2000 | 3000 |
Populate the DB:
./db_bench --benchmarks=fillrandom --num=90000000 -db=/mem_fragmentation/db_bench_1
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=90000000 -use_existing_db -db=/mem_fragmentation/db_bench_1
Run read tests with differnt cache setting:
T1_3:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_3_20220710 -duration=1800 &
T1_4:
MALLOC_CONF="prof:true,prof_stats:true" ../rocksdb/db_bench --benchmarks=seekrandom --threads=16 --num=90000000 -use_existing_db --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -db=/mem_fragmentation/db_bench_1 --print_malloc_stats=true > ~/temp/mem_frag/20220710/jemalloc_stats_json_T1_4_20220710 -duration=1800 &
For T1_3 and T1_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T1_3 | T1_3 after mem defrag | T1_4 | T1_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8728 | 8076 | 5518 | 5043 |
| available (MB) | 8753 | 8092 | 5536 | 5051 |
| external fragmentation rate | 0.003 | 0.002 | 0.003 | 0.0016 |
| resident (MB) | 8956 | 8365 | 5655 | 5235 |
**T2**
Keys: 32 bytes each (+ 0 bytes user-defined timestamp)
Values: 256 bytes each (128 bytes after compression)
Entries: 40000000
RawSize: 10986.3 MB (estimated)
FileSize: 6103.5 MB (estimated)
| Test Name | Primary Cache Size (MB) | Compressed Secondary Cache Size (MB) |
| - | - | - |
| T2_3 | 4000 | 4000 |
| T2_4 | 2000 | 3000 |
Create DB (10GB):
./db_bench -benchmarks=fillrandom -use_direct_reads=true -num=40000000 -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Overwrite it to a stable state:
./db_bench --benchmarks=overwrite --num=40000000 -use_existing_db -key_size=32 -value_size=256 -db=/mem_fragmentation/db_bench_2
Run read tests with differnt cache setting:
T2_3:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=4000000000 -compressed_secondary_cache_size=4000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_3 -duration=1800 &
T2_4:
MALLOC_CONF="prof:true,prof_stats:true" ./db_bench --benchmarks="mixgraph" -use_direct_io_for_flush_and_compaction=true -use_direct_reads=true -cache_size=2000000000 -compressed_secondary_cache_size=3000000000 -use_compressed_secondary_cache -keyrange_dist_a=14.18 -keyrange_dist_b=-2.917 -keyrange_dist_c=0.0164 -keyrange_dist_d=-0.08082 -keyrange_num=30 -value_k=0.2615 -value_sigma=25.45 -iter_k=2.517 -iter_sigma=14.236 -mix_get_ratio=0.85 -mix_put_ratio=0.14 -mix_seek_ratio=0.01 -sine_mix_rate_interval_milliseconds=5000 -sine_a=1000 -sine_b=0.000073 -sine_d=400000 -reads=80000000 -num=40000000 -key_size=32 -value_size=256 -use_existing_db=true -db=/mem_fragmentation/db_bench_2 --print_malloc_stats=true > ~/temp/mem_frag/jemalloc_stats_T2_4 -duration=1800 &
For T2_3 and T2_4, I also conducted the tests before and after this PR. The following table show the important jemalloc stats.
| Test Name | T2_3 | T2_3 after mem defrag | T2_4 | T2_4 after mem defrag |
| - | - | - | - | - |
| allocated (MB) | 8425 | 8093 | 5426 | 5149 |
| available (MB) | 8489 | 8138 | 5435 | 5158 |
| external fragmentation rate | 0.008 | 0.0055 | 0.0017 | 0.0017 |
| resident (MB) | 8676 | 8392 | 5541 | 5321 |
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10287
Test Plan: Unit tests.
Reviewed By: anand1976
Differential Revision: D37743362
Pulled By: gitbw95
fbshipit-source-id: 0010c5af08addeacc5ebbc4ffe5be882fb1d38ad
2022-08-02 22:28:11 +00:00
|
|
|
|
|
|
|
struct CacheValueChunk {
|
|
|
|
// TODO try "CacheAllocationPtr next;".
|
|
|
|
CacheValueChunk* next;
|
|
|
|
size_t size;
|
|
|
|
// Beginning of the chunk data (MUST BE THE LAST FIELD IN THIS STRUCT!)
|
|
|
|
char data[1];
|
|
|
|
|
|
|
|
void Free() { delete[] reinterpret_cast<char*>(this); }
|
|
|
|
};
|
|
|
|
|
|
|
|
// Split value into chunks to better fit into jemalloc bins. The chunks
|
|
|
|
// are stored in CacheValueChunk and extra charge is needed for each chunk,
|
|
|
|
// so the cache charge is recalculated here.
|
|
|
|
CacheValueChunk* SplitValueIntoChunks(const Slice& value,
|
|
|
|
const CompressionType compression_type,
|
|
|
|
size_t& charge);
|
|
|
|
|
|
|
|
// After merging chunks, the extra charge for each chunk is removed, so
|
|
|
|
// the charge is recalculated.
|
|
|
|
CacheAllocationPtr MergeChunksIntoValue(const void* chunks_head,
|
|
|
|
size_t& charge);
|
|
|
|
|
|
|
|
// An implementation of Cache::DeleterFn.
|
2022-09-16 22:41:49 +00:00
|
|
|
static Cache::DeleterFn GetDeletionCallback(bool enable_custom_split_merge);
|
2022-02-24 00:06:27 +00:00
|
|
|
std::shared_ptr<Cache> cache_;
|
2022-04-11 20:28:33 +00:00
|
|
|
CompressedSecondaryCacheOptions cache_options_;
|
2022-02-24 00:06:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|