diff --git a/db/memtable.cc b/db/memtable.cc index 2acf9b6da0..9f12f4130d 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -127,6 +127,22 @@ MemTable::MemTable(const InternalKeyComparator& cmp, 6 /* hard coded 6 probes */, moptions_.memtable_huge_page_size, ioptions.logger)); } + // Initialize cached_range_tombstone_ here since it could + // be read before it is constructed in MemTable::Add(), which could also lead + // to a data race on the global mutex table backing atomic shared_ptr. + auto new_cache = std::make_shared(); + size_t size = cached_range_tombstone_.Size(); + for (size_t i = 0; i < size; ++i) { + std::shared_ptr* local_cache_ref_ptr = + cached_range_tombstone_.AccessAtCore(i); + auto new_local_cache_ref = std::make_shared< + const std::shared_ptr>(new_cache); + std::atomic_store_explicit( + local_cache_ref_ptr, + std::shared_ptr(new_local_cache_ref, + new_cache.get()), + std::memory_order_relaxed); + } } MemTable::~MemTable() { diff --git a/db/memtable.h b/db/memtable.h index 86c2160af9..141100228a 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -536,11 +536,6 @@ class MemTable { size_t protection_bytes_per_key, bool allow_data_in_errors = false); - // makes sure there is a single range tombstone writer to invalidate cache - std::mutex range_del_mutex_; - CoreLocalArray> - cached_range_tombstone_; - private: enum FlushStateEnum { FLUSH_NOT_REQUESTED, FLUSH_REQUESTED, FLUSH_SCHEDULED }; @@ -654,6 +649,11 @@ class MemTable { std::unique_ptr fragmented_range_tombstone_list_; + // makes sure there is a single range tombstone writer to invalidate cache + std::mutex range_del_mutex_; + CoreLocalArray> + cached_range_tombstone_; + void UpdateEntryChecksum(const ProtectionInfoKVOS64* kv_prot_info, const Slice& key, const Slice& value, ValueType type, SequenceNumber s, char* checksum_ptr);