mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 22:44:05 +00:00
65cde19f40
Summary: See new atomic.h file comments for motivation. I have updated HyperClockCache to use the new atomic wrapper, fixing a few cases where an implicit conversion was accidentally used and therefore mixing std::memory_order_seq_cst where release/acquire ordering (or relaxed) was intended. There probably wasn't a real bug because I think all the cases happened to be in single-threaded contexts like constructors/destructors or statistical ops like `GetCapacity()` that don't need any particular ordering constraints. Recommended follow-up: * Replace other uses of std::atomic to help keep them safe from bugs. Pull Request resolved: https://github.com/facebook/rocksdb/pull/12051 Test Plan: Did some local correctness stress testing with cache_bench. Also triggered 15 runs of fbcode_blackbox_crash_test and saw no related failures (just 3 failures in ~CacheWithSecondaryAdapter(), already known) No performance difference seen before & after running simultaneously: ``` (while ./cache_bench -cache_type=fixed_hyper_clock_cache -populate_cache=0 -cache_size=3000000000 -ops_per_thread=500000 -threads=12 -histograms=0 2>&1 | grep parallel; do :; done) | awk '{ s += $3; c++; print "Avg time: " (s/c);}' ``` ... for both fixed_hcc and auto_hcc. Reviewed By: jowlyzhang Differential Revision: D51090518 Pulled By: pdillinger fbshipit-source-id: eeb324facb3185584603f9ea0c4de6f32919a2d7
112 lines
4.1 KiB
C++
112 lines
4.1 KiB
C++
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#pragma once
|
|
|
|
#include <atomic>
|
|
|
|
#include "rocksdb/rocksdb_namespace.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// Background:
|
|
// std::atomic is somewhat easy to misuse:
|
|
// * Implicit conversion to T using std::memory_order_seq_cst, along with
|
|
// memory order parameter defaults, make it easy to accidentally mix sequential
|
|
// consistency ordering with acquire/release memory ordering. See
|
|
// "The single total order might not be consistent with happens-before" at
|
|
// https://en.cppreference.com/w/cpp/atomic/memory_order
|
|
// * It's easy to use nonsensical (UB) combinations like store with
|
|
// std::memory_order_acquire.
|
|
// For such reasons, we provide wrappers below to make safe usage easier.
|
|
|
|
// Wrapper around std::atomic to avoid certain bugs (see Background above).
|
|
//
|
|
// This relaxed-only wrapper is intended for atomics that do not need
|
|
// ordering constraints with other data reads/writes aside from those
|
|
// necessary for computing data values or given by other happens-before
|
|
// relationships. For example, a cross-thread counter that never returns
|
|
// the same result can be a RelaxedAtomic.
|
|
template <typename T>
|
|
class RelaxedAtomic {
|
|
public:
|
|
explicit RelaxedAtomic(T initial = {}) : v_(initial) {}
|
|
void StoreRelaxed(T desired) { v_.store(desired, std::memory_order_relaxed); }
|
|
T LoadRelaxed() const { return v_.load(std::memory_order_relaxed); }
|
|
bool CasWeakRelaxed(T& expected, T desired) {
|
|
return v_.compare_exchange_weak(expected, desired,
|
|
std::memory_order_relaxed);
|
|
}
|
|
bool CasStrongRelaxed(T& expected, T desired) {
|
|
return v_.compare_exchange_strong(expected, desired,
|
|
std::memory_order_relaxed);
|
|
}
|
|
T ExchangeRelaxed(T desired) {
|
|
return v_.exchange(desired, std::memory_order_relaxed);
|
|
}
|
|
T FetchAddRelaxed(T operand) {
|
|
return v_.fetch_add(operand, std::memory_order_relaxed);
|
|
}
|
|
T FetchSubRelaxed(T operand) {
|
|
return v_.fetch_sub(operand, std::memory_order_relaxed);
|
|
}
|
|
T FetchAndRelaxed(T operand) {
|
|
return v_.fetch_and(operand, std::memory_order_relaxed);
|
|
}
|
|
T FetchOrRelaxed(T operand) {
|
|
return v_.fetch_or(operand, std::memory_order_relaxed);
|
|
}
|
|
T FetchXorRelaxed(T operand) {
|
|
return v_.fetch_xor(operand, std::memory_order_relaxed);
|
|
}
|
|
|
|
protected:
|
|
std::atomic<T> v_;
|
|
};
|
|
|
|
// Wrapper around std::atomic to avoid certain bugs (see Background above).
|
|
//
|
|
// Except for some unusual cases requiring sequential consistency, this is
|
|
// a general-purpose atomic. Relaxed operations can be mixed in as appropriate.
|
|
template <typename T>
|
|
class AcqRelAtomic : public RelaxedAtomic<T> {
|
|
public:
|
|
explicit AcqRelAtomic(T initial = {}) : RelaxedAtomic<T>(initial) {}
|
|
void Store(T desired) {
|
|
RelaxedAtomic<T>::v_.store(desired, std::memory_order_release);
|
|
}
|
|
T Load() const {
|
|
return RelaxedAtomic<T>::v_.load(std::memory_order_acquire);
|
|
}
|
|
bool CasWeak(T& expected, T desired) {
|
|
return RelaxedAtomic<T>::v_.compare_exchange_weak(
|
|
expected, desired, std::memory_order_acq_rel);
|
|
}
|
|
bool CasStrong(T& expected, T desired) {
|
|
return RelaxedAtomic<T>::v_.compare_exchange_strong(
|
|
expected, desired, std::memory_order_acq_rel);
|
|
}
|
|
T Exchange(T desired) {
|
|
return RelaxedAtomic<T>::v_.exchange(desired, std::memory_order_acq_rel);
|
|
}
|
|
T FetchAdd(T operand) {
|
|
return RelaxedAtomic<T>::v_.fetch_add(operand, std::memory_order_acq_rel);
|
|
}
|
|
T FetchSub(T operand) {
|
|
return RelaxedAtomic<T>::v_.fetch_sub(operand, std::memory_order_acq_rel);
|
|
}
|
|
T FetchAnd(T operand) {
|
|
return RelaxedAtomic<T>::v_.fetch_and(operand, std::memory_order_acq_rel);
|
|
}
|
|
T FetchOr(T operand) {
|
|
return RelaxedAtomic<T>::v_.fetch_or(operand, std::memory_order_acq_rel);
|
|
}
|
|
T FetchXor(T operand) {
|
|
return RelaxedAtomic<T>::v_.fetch_xor(operand, std::memory_order_acq_rel);
|
|
}
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|