rocksdb/util/dynamic_bloom.cc
Peter Dillinger 7fff38b1fe clang-format cache/ and util/ directories (#10867)
Summary:
This is purely the result of running `clang-format -i` on files, except some files have been excluded for manual intervention in a separate PR

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10867

Test Plan: `make check`, `make check-headers`, `make format`

Reviewed By: jay-zhuang

Differential Revision: D40682086

Pulled By: pdillinger

fbshipit-source-id: 8673d978553ab99b516da7fb63ba0b82523337f8
2022-10-26 12:08:20 -07:00

71 lines
2.3 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "dynamic_bloom.h"
#include <algorithm>
#include "memory/allocator.h"
#include "port/port.h"
#include "rocksdb/slice.h"
#include "util/hash.h"
namespace ROCKSDB_NAMESPACE {
namespace {
uint32_t roundUpToPow2(uint32_t x) {
uint32_t rv = 1;
while (rv < x) {
rv <<= 1;
}
return rv;
}
} // namespace
DynamicBloom::DynamicBloom(Allocator* allocator, uint32_t total_bits,
uint32_t num_probes, size_t huge_page_tlb_size,
Logger* logger)
// Round down, except round up with 1
: kNumDoubleProbes((num_probes + (num_probes == 1)) / 2) {
assert(num_probes % 2 == 0); // limitation of current implementation
assert(num_probes <= 10); // limitation of current implementation
assert(kNumDoubleProbes > 0);
// Determine how much to round off + align by so that x ^ i (that's xor) is
// a valid u64 index if x is a valid u64 index and 0 <= i < kNumDoubleProbes.
uint32_t block_bytes = /*bytes/u64*/ 8 *
/*u64s*/ std::max(1U, roundUpToPow2(kNumDoubleProbes));
uint32_t block_bits = block_bytes * 8;
uint32_t blocks = (total_bits + block_bits - 1) / block_bits;
uint32_t sz = blocks * block_bytes;
kLen = sz / /*bytes/u64*/ 8;
assert(kLen > 0);
#ifndef NDEBUG
for (uint32_t i = 0; i < kNumDoubleProbes; ++i) {
// Ensure probes starting at last word are in range
assert(((kLen - 1) ^ i) < kLen);
}
#endif
// Padding to correct for allocation not originally aligned on block_bytes
// boundary
sz += block_bytes - 1;
assert(allocator);
char* raw = allocator->AllocateAligned(sz, huge_page_tlb_size, logger);
memset(raw, 0, sz);
auto block_offset = reinterpret_cast<uintptr_t>(raw) % block_bytes;
if (block_offset > 0) {
// Align on block_bytes boundary
raw += block_bytes - block_offset;
}
static_assert(sizeof(std::atomic<uint64_t>) == sizeof(uint64_t),
"Expecting zero-space-overhead atomic");
data_ = reinterpret_cast<std::atomic<uint64_t>*>(raw);
}
} // namespace ROCKSDB_NAMESPACE