Clean up some FastRange calls (#11707)

Summary:
* JemallocNodumpAllocator was passing a size_t to FastRange32, which could cause compilation errors or warnings (seen with clang)
* Fixed the order of arguments to match what would be used with modulo operator (%), for clarity.

Fixes https://github.com/facebook/rocksdb/issues/11006

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11707

Test Plan: no functional change, existing tests

Reviewed By: ajkr

Differential Revision: D48435149

Pulled By: pdillinger

fbshipit-source-id: e6e8b107ded4eceda37db20df59985c846a2546b
This commit is contained in:
Peter Dillinger 2023-08-17 11:52:38 -07:00 committed by Facebook GitHub Bot
parent d1ff401472
commit 966be1cc4e
3 changed files with 9 additions and 8 deletions

View File

@ -124,7 +124,8 @@ uint32_t JemallocNodumpAllocator::GetArenaIndex() const {
// to make Random thread-safe and prevent cacheline bouncing. Whether this is
// worthwhile is still an open question.
thread_local Random tl_random(next_seed.fetch_add(1));
return arena_indexes_[FastRange32(tl_random.Next(), arena_indexes_.size())];
return arena_indexes_[FastRange32(
tl_random.Next(), static_cast<uint32_t>(arena_indexes_.size()))];
}
Status JemallocNodumpAllocator::InitializeArenas() {

View File

@ -199,7 +199,7 @@ class FastLocalBloomImpl {
static inline void AddHash(uint32_t h1, uint32_t h2, uint32_t len_bytes,
int num_probes, char *data) {
uint32_t bytes_to_cache_line = FastRange32(len_bytes >> 6, h1) << 6;
uint32_t bytes_to_cache_line = FastRange32(h1, len_bytes >> 6) << 6;
AddHashPrepared(h2, num_probes, data + bytes_to_cache_line);
}
@ -216,7 +216,7 @@ class FastLocalBloomImpl {
static inline void PrepareHash(uint32_t h1, uint32_t len_bytes,
const char *data,
uint32_t /*out*/ *byte_offset) {
uint32_t bytes_to_cache_line = FastRange32(len_bytes >> 6, h1) << 6;
uint32_t bytes_to_cache_line = FastRange32(h1, len_bytes >> 6) << 6;
PREFETCH(data + bytes_to_cache_line, 0 /* rw */, 1 /* locality */);
PREFETCH(data + bytes_to_cache_line + 63, 0 /* rw */, 1 /* locality */);
*byte_offset = bytes_to_cache_line;
@ -224,7 +224,7 @@ class FastLocalBloomImpl {
static inline bool HashMayMatch(uint32_t h1, uint32_t h2, uint32_t len_bytes,
int num_probes, const char *data) {
uint32_t bytes_to_cache_line = FastRange32(len_bytes >> 6, h1) << 6;
uint32_t bytes_to_cache_line = FastRange32(h1, len_bytes >> 6) << 6;
return HashMayMatchPrepared(h2, num_probes, data + bytes_to_cache_line);
}

View File

@ -126,7 +126,7 @@ inline void DynamicBloom::MayContain(int num_keys, Slice* keys,
std::array<size_t, MultiGetContext::MAX_BATCH_SIZE> byte_offsets;
for (int i = 0; i < num_keys; ++i) {
hashes[i] = BloomHash(keys[i]);
size_t a = FastRange32(kLen, hashes[i]);
size_t a = FastRange32(hashes[i], kLen);
PREFETCH(data_ + a, 0, 3);
byte_offsets[i] = a;
}
@ -142,7 +142,7 @@ inline void DynamicBloom::MayContain(int num_keys, Slice* keys,
#pragma warning(disable : 4189)
#endif
inline void DynamicBloom::Prefetch(uint32_t h32) {
size_t a = FastRange32(kLen, h32);
size_t a = FastRange32(h32, kLen);
PREFETCH(data_ + a, 0, 3);
}
#if defined(_MSC_VER)
@ -171,7 +171,7 @@ inline void DynamicBloom::Prefetch(uint32_t h32) {
// because of false positives.)
inline bool DynamicBloom::MayContainHash(uint32_t h32) const {
size_t a = FastRange32(kLen, h32);
size_t a = FastRange32(h32, kLen);
PREFETCH(data_ + a, 0, 3);
return DoubleProbe(h32, a);
}
@ -195,7 +195,7 @@ inline bool DynamicBloom::DoubleProbe(uint32_t h32, size_t byte_offset) const {
template <typename OrFunc>
inline void DynamicBloom::AddHash(uint32_t h32, const OrFunc& or_func) {
size_t a = FastRange32(kLen, h32);
size_t a = FastRange32(h32, kLen);
PREFETCH(data_ + a, 0, 3);
// Expand/remix with 64-bit golden ratio
uint64_t h = 0x9e3779b97f4a7c13ULL * h32;