mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
f241d082b6
Summary: ### **Summary:** When both LRU Cache and CompressedSecondaryCache are configured together, there possibly are some data blocks double cached. **Changes include:** 1. Update IS_PROMOTED to IS_IN_SECONDARY_CACHE to prevent confusions. 2. This PR updates SecondaryCacheResultHandle and use IsErasedFromSecondaryCache to determine whether the handle is erased in the secondary cache. Then, the caller can determine whether to SetIsInSecondaryCache(). 3. Rename LRUSecondaryCache to CompressedSecondaryCache. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9747 Test Plan: **Test Scripts:** 1. Populate a DB. The on disk footprint is 482 MB. The data is set to be 50% compressible, so the total decompressed size is expected to be 964 MB. ./db_bench --benchmarks=fillrandom --num=10000000 -db=/db_bench_1 2. overwrite it to a stable state: ./db_bench --benchmarks=overwrite,stats --num=10000000 -use_existing_db -duration=10 --benchmark_write_rate_limit=2000000 -db=/db_bench_1 4. Run read tests with diffeernt cache setting: T1: ./db_bench --benchmarks=seekrandom,stats --threads=16 --num=10000000 -use_existing_db -duration=120 --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=520000000 --statistics -db=/db_bench_1 T2: ./db_bench --benchmarks=seekrandom,stats --threads=16 --num=10000000 -use_existing_db -duration=120 --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=320000000 -compressed_secondary_cache_size=400000000 --statistics -use_compressed_secondary_cache -db=/db_bench_1 T3: ./db_bench --benchmarks=seekrandom,stats --threads=16 --num=10000000 -use_existing_db -duration=120 --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=520000000 -compressed_secondary_cache_size=400000000 --statistics -use_compressed_secondary_cache -db=/db_bench_1 T4: ./db_bench --benchmarks=seekrandom,stats --threads=16 --num=10000000 -use_existing_db -duration=120 --benchmark_write_rate_limit=52000000 -use_direct_reads --cache_size=20000000 -compressed_secondary_cache_size=500000000 --statistics -use_compressed_secondary_cache -db=/db_bench_1 **Before this PR** | Cache Size | Compressed Secondary Cache Size | Cache Hit Rate | |------------|-------------------------------------|----------------| |520 MB | 0 MB | 85.5% | |320 MB | 400 MB | 96.2% | |520 MB | 400 MB | 98.3% | |20 MB | 500 MB | 98.8% | **Before this PR** | Cache Size | Compressed Secondary Cache Size | Cache Hit Rate | |------------|-------------------------------------|----------------| |520 MB | 0 MB | 85.5% | |320 MB | 400 MB | 99.9% | |520 MB | 400 MB | 99.9% | |20 MB | 500 MB | 99.2% | Reviewed By: anand1976 Differential Revision: D35117499 Pulled By: gitbw95 fbshipit-source-id: ea2657749fc13efebe91a8a1b56bc61d6a224a12
172 lines
6.3 KiB
C++
172 lines
6.3 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#include "cache/compressed_secondary_cache.h"
|
|
|
|
#include <memory>
|
|
|
|
#include "memory/memory_allocator.h"
|
|
#include "util/compression.h"
|
|
#include "util/string_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
namespace {
|
|
|
|
void DeletionCallback(const Slice& /*key*/, void* obj) {
|
|
delete reinterpret_cast<CacheAllocationPtr*>(obj);
|
|
obj = nullptr;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
CompressedSecondaryCache::CompressedSecondaryCache(
|
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
double high_pri_pool_ratio,
|
|
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
|
CacheMetadataChargePolicy metadata_charge_policy,
|
|
CompressionType compression_type, uint32_t compress_format_version)
|
|
: cache_options_(capacity, num_shard_bits, strict_capacity_limit,
|
|
high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
|
|
metadata_charge_policy, compression_type,
|
|
compress_format_version) {
|
|
cache_ = NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
|
high_pri_pool_ratio, memory_allocator,
|
|
use_adaptive_mutex, metadata_charge_policy);
|
|
}
|
|
|
|
CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
|
|
|
|
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
|
|
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
|
bool& is_in_sec_cache) {
|
|
std::unique_ptr<SecondaryCacheResultHandle> handle;
|
|
is_in_sec_cache = false;
|
|
Cache::Handle* lru_handle = cache_->Lookup(key);
|
|
if (lru_handle == nullptr) {
|
|
return handle;
|
|
}
|
|
|
|
CacheAllocationPtr* ptr =
|
|
reinterpret_cast<CacheAllocationPtr*>(cache_->Value(lru_handle));
|
|
void* value = nullptr;
|
|
size_t charge = 0;
|
|
Status s;
|
|
|
|
if (cache_options_.compression_type == kNoCompression) {
|
|
s = create_cb(ptr->get(), cache_->GetCharge(lru_handle), &value, &charge);
|
|
} else {
|
|
UncompressionContext uncompression_context(cache_options_.compression_type);
|
|
UncompressionInfo uncompression_info(uncompression_context,
|
|
UncompressionDict::GetEmptyDict(),
|
|
cache_options_.compression_type);
|
|
|
|
size_t uncompressed_size = 0;
|
|
CacheAllocationPtr uncompressed;
|
|
uncompressed = UncompressData(
|
|
uncompression_info, (char*)ptr->get(), cache_->GetCharge(lru_handle),
|
|
&uncompressed_size, cache_options_.compress_format_version,
|
|
cache_options_.memory_allocator.get());
|
|
|
|
if (!uncompressed) {
|
|
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
|
return handle;
|
|
}
|
|
s = create_cb(uncompressed.get(), uncompressed_size, &value, &charge);
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
|
return handle;
|
|
}
|
|
|
|
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
|
handle.reset(new CompressedSecondaryCacheResultHandle(value, charge));
|
|
|
|
return handle;
|
|
}
|
|
|
|
Status CompressedSecondaryCache::Insert(const Slice& key, void* value,
|
|
const Cache::CacheItemHelper* helper) {
|
|
size_t size = (*helper->size_cb)(value);
|
|
CacheAllocationPtr ptr =
|
|
AllocateBlock(size, cache_options_.memory_allocator.get());
|
|
|
|
Status s = (*helper->saveto_cb)(value, 0, size, ptr.get());
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
Slice val(ptr.get(), size);
|
|
|
|
std::string compressed_val;
|
|
if (cache_options_.compression_type != kNoCompression) {
|
|
CompressionOptions compression_opts;
|
|
CompressionContext compression_context(cache_options_.compression_type);
|
|
uint64_t sample_for_compression = 0;
|
|
CompressionInfo compression_info(
|
|
compression_opts, compression_context, CompressionDict::GetEmptyDict(),
|
|
cache_options_.compression_type, sample_for_compression);
|
|
|
|
bool success =
|
|
CompressData(val, compression_info,
|
|
cache_options_.compress_format_version, &compressed_val);
|
|
|
|
if (!success) {
|
|
return Status::Corruption("Error compressing value.");
|
|
}
|
|
|
|
val = Slice(compressed_val);
|
|
size = compressed_val.size();
|
|
ptr = AllocateBlock(size, cache_options_.memory_allocator.get());
|
|
memcpy(ptr.get(), compressed_val.data(), size);
|
|
}
|
|
|
|
CacheAllocationPtr* buf = new CacheAllocationPtr(std::move(ptr));
|
|
|
|
return cache_->Insert(key, buf, size, DeletionCallback);
|
|
}
|
|
|
|
void CompressedSecondaryCache::Erase(const Slice& key) { cache_->Erase(key); }
|
|
|
|
std::string CompressedSecondaryCache::GetPrintableOptions() const {
|
|
std::string ret;
|
|
ret.reserve(20000);
|
|
const int kBufferSize = 200;
|
|
char buffer[kBufferSize];
|
|
ret.append(cache_->GetPrintableOptions());
|
|
snprintf(buffer, kBufferSize, " compression_type : %s\n",
|
|
CompressionTypeToString(cache_options_.compression_type).c_str());
|
|
ret.append(buffer);
|
|
snprintf(buffer, kBufferSize, " compression_type : %d\n",
|
|
cache_options_.compress_format_version);
|
|
ret.append(buffer);
|
|
return ret;
|
|
}
|
|
|
|
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
|
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
double high_pri_pool_ratio,
|
|
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
|
CacheMetadataChargePolicy metadata_charge_policy,
|
|
CompressionType compression_type, uint32_t compress_format_version) {
|
|
return std::make_shared<CompressedSecondaryCache>(
|
|
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
|
|
memory_allocator, use_adaptive_mutex, metadata_charge_policy,
|
|
compression_type, compress_format_version);
|
|
}
|
|
|
|
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
|
|
const CompressedSecondaryCacheOptions& opts) {
|
|
// The secondary_cache is disabled for this LRUCache instance.
|
|
assert(opts.secondary_cache == nullptr);
|
|
return NewCompressedSecondaryCache(
|
|
opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
|
|
opts.high_pri_pool_ratio, opts.memory_allocator, opts.use_adaptive_mutex,
|
|
opts.metadata_charge_policy, opts.compression_type,
|
|
opts.compress_format_version);
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|