mirror of https://github.com/facebook/rocksdb.git
Summary:
This reverts commit 0d885e80d4
. The original commit causes a ASAN stack-use-after-return failure due to the `CreateCallback` being allocated on stack and then used in another thread when a secondary cache object is promoted to the primary cache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10541
Reviewed By: gitbw95
Differential Revision: D38850039
Pulled By: anand1976
fbshipit-source-id: 810c592b7de2523693f5bb267159b23b0ee9132c
This commit is contained in:
parent
13cb7a84b6
commit
2553d1efa1
|
@ -41,7 +41,6 @@
|
|||
|
||||
### Performance Improvements
|
||||
* Instead of constructing `FragmentedRangeTombstoneList` during every read operation, it is now constructed once and stored in immutable memtables. This improves speed of querying range tombstones from immutable memtables.
|
||||
* Improve read performance by avoiding dynamic memory allocation.
|
||||
* When using iterators with the integrated BlobDB implementation, blob cache handles are now released immediately when the iterator's position changes.
|
||||
|
||||
## Behavior Change
|
||||
|
|
|
@ -1251,12 +1251,8 @@ Status BlockBasedTable::GetDataBlockFromCache(
|
|||
Statistics* statistics = rep_->ioptions.statistics.get();
|
||||
bool using_zstd = rep_->blocks_definitely_zstd_compressed;
|
||||
const FilterPolicy* filter_policy = rep_->filter_policy;
|
||||
CacheCreateCallback<TBlocklike> callback(read_amp_bytes_per_bit, statistics,
|
||||
using_zstd, filter_policy);
|
||||
// avoid dynamic memory allocation by using the reference (std::ref) of the
|
||||
// callback. Otherwise, binding a functor to std::function will allocate extra
|
||||
// memory from heap.
|
||||
Cache::CreateCallback create_cb(std::ref(callback));
|
||||
Cache::CreateCallback create_cb = GetCreateCallback<TBlocklike>(
|
||||
read_amp_bytes_per_bit, statistics, using_zstd, filter_policy);
|
||||
|
||||
// Lookup uncompressed cache first
|
||||
if (block_cache != nullptr) {
|
||||
|
@ -1286,11 +1282,8 @@ Status BlockBasedTable::GetDataBlockFromCache(
|
|||
BlockContents contents;
|
||||
if (rep_->ioptions.lowest_used_cache_tier ==
|
||||
CacheTier::kNonVolatileBlockTier) {
|
||||
CacheCreateCallback<BlockContents> special_callback(
|
||||
Cache::CreateCallback create_cb_special = GetCreateCallback<BlockContents>(
|
||||
read_amp_bytes_per_bit, statistics, using_zstd, filter_policy);
|
||||
// avoid dynamic memory allocation by using the reference (std::ref) of the
|
||||
// callback. Make sure the callback is only used within this code block.
|
||||
Cache::CreateCallback create_cb_special(std::ref(special_callback));
|
||||
block_cache_compressed_handle = block_cache_compressed->Lookup(
|
||||
cache_key,
|
||||
BlocklikeTraits<BlockContents>::GetCacheItemHelper(block_type),
|
||||
|
|
|
@ -21,42 +21,24 @@ template <typename T, CacheEntryRole R>
|
|||
Cache::CacheItemHelper* GetCacheItemHelperForRole();
|
||||
|
||||
template <typename TBlocklike>
|
||||
class CacheCreateCallback {
|
||||
public:
|
||||
CacheCreateCallback() = delete;
|
||||
CacheCreateCallback(const CacheCreateCallback&) = delete;
|
||||
CacheCreateCallback(CacheCreateCallback&&) = delete;
|
||||
CacheCreateCallback& operator=(const CacheCreateCallback&) = delete;
|
||||
CacheCreateCallback& operator=(CacheCreateCallback&&) = delete;
|
||||
|
||||
explicit CacheCreateCallback(size_t read_amp_bytes_per_bit,
|
||||
Statistics* statistics, bool using_zstd,
|
||||
const FilterPolicy* filter_policy)
|
||||
: read_amp_bytes_per_bit_(read_amp_bytes_per_bit),
|
||||
statistics_(statistics),
|
||||
using_zstd_(using_zstd),
|
||||
filter_policy_(filter_policy) {}
|
||||
|
||||
Status operator()(const void* buf, size_t size, void** out_obj,
|
||||
size_t* charge) {
|
||||
Cache::CreateCallback GetCreateCallback(size_t read_amp_bytes_per_bit,
|
||||
Statistics* statistics, bool using_zstd,
|
||||
const FilterPolicy* filter_policy) {
|
||||
return [read_amp_bytes_per_bit, statistics, using_zstd, filter_policy](
|
||||
const void* buf, size_t size, void** out_obj,
|
||||
size_t* charge) -> Status {
|
||||
assert(buf != nullptr);
|
||||
std::unique_ptr<char[]> buf_data(new char[size]());
|
||||
memcpy(buf_data.get(), buf, size);
|
||||
BlockContents bc = BlockContents(std::move(buf_data), size);
|
||||
TBlocklike* ucd_ptr = BlocklikeTraits<TBlocklike>::Create(
|
||||
std::move(bc), read_amp_bytes_per_bit_, statistics_, using_zstd_,
|
||||
filter_policy_);
|
||||
std::move(bc), read_amp_bytes_per_bit, statistics, using_zstd,
|
||||
filter_policy);
|
||||
*out_obj = reinterpret_cast<void*>(ucd_ptr);
|
||||
*charge = size;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
private:
|
||||
const size_t read_amp_bytes_per_bit_;
|
||||
Statistics* statistics_;
|
||||
const bool using_zstd_;
|
||||
const FilterPolicy* filter_policy_;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
template <>
|
||||
class BlocklikeTraits<BlockContents> {
|
||||
|
|
Loading…
Reference in New Issue