mirror of https://github.com/facebook/rocksdb.git
Fix data race in accessing `cached_range_tombstone_` (#10680)
Summary: fix a data race introduced in https://github.com/facebook/rocksdb/issues/10547 (P5295241720), first reported by pdillinger. The race is between the `std::atomic_load_explicit` in NewRangeTombstoneIteratorInternal and the `std::atomic_store_explicit` in MemTable::Add() that operate on `cached_range_tombstone_`. P5295241720 shows that `atomic_store_explicit` initializes some mutex which `atomic_load_explicit` could be trying to call `lock()` on at the same time. This fix moves the initialization to memtable constructor. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10680 Test Plan: `USE_CLANG=1 COMPILE_WITH_TSAN=1 make -j24 whitebox_crash_test` Reviewed By: ajkr Differential Revision: D39528696 Pulled By: cbi42 fbshipit-source-id: ee740841044438e18ad2b8ea567444dd542dd8e2
This commit is contained in:
parent
832fd644fc
commit
be04a3b6cd
|
@ -127,6 +127,22 @@ MemTable::MemTable(const InternalKeyComparator& cmp,
|
|||
6 /* hard coded 6 probes */,
|
||||
moptions_.memtable_huge_page_size, ioptions.logger));
|
||||
}
|
||||
// Initialize cached_range_tombstone_ here since it could
|
||||
// be read before it is constructed in MemTable::Add(), which could also lead
|
||||
// to a data race on the global mutex table backing atomic shared_ptr.
|
||||
auto new_cache = std::make_shared<FragmentedRangeTombstoneListCache>();
|
||||
size_t size = cached_range_tombstone_.Size();
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
std::shared_ptr<FragmentedRangeTombstoneListCache>* local_cache_ref_ptr =
|
||||
cached_range_tombstone_.AccessAtCore(i);
|
||||
auto new_local_cache_ref = std::make_shared<
|
||||
const std::shared_ptr<FragmentedRangeTombstoneListCache>>(new_cache);
|
||||
std::atomic_store_explicit(
|
||||
local_cache_ref_ptr,
|
||||
std::shared_ptr<FragmentedRangeTombstoneListCache>(new_local_cache_ref,
|
||||
new_cache.get()),
|
||||
std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
MemTable::~MemTable() {
|
||||
|
|
|
@ -536,11 +536,6 @@ class MemTable {
|
|||
size_t protection_bytes_per_key,
|
||||
bool allow_data_in_errors = false);
|
||||
|
||||
// makes sure there is a single range tombstone writer to invalidate cache
|
||||
std::mutex range_del_mutex_;
|
||||
CoreLocalArray<std::shared_ptr<FragmentedRangeTombstoneListCache>>
|
||||
cached_range_tombstone_;
|
||||
|
||||
private:
|
||||
enum FlushStateEnum { FLUSH_NOT_REQUESTED, FLUSH_REQUESTED, FLUSH_SCHEDULED };
|
||||
|
||||
|
@ -654,6 +649,11 @@ class MemTable {
|
|||
std::unique_ptr<FragmentedRangeTombstoneList>
|
||||
fragmented_range_tombstone_list_;
|
||||
|
||||
// makes sure there is a single range tombstone writer to invalidate cache
|
||||
std::mutex range_del_mutex_;
|
||||
CoreLocalArray<std::shared_ptr<FragmentedRangeTombstoneListCache>>
|
||||
cached_range_tombstone_;
|
||||
|
||||
void UpdateEntryChecksum(const ProtectionInfoKVOS64* kv_prot_info,
|
||||
const Slice& key, const Slice& value, ValueType type,
|
||||
SequenceNumber s, char* checksum_ptr);
|
||||
|
|
Loading…
Reference in New Issue