mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 22:44:05 +00:00
efd035164b
Summary: Especially after updating to C++17, I don't see a compelling case for *requiring* any folly components in RocksDB. I was able to purge the existing hard dependencies, and it can be quite difficult to strip out non-trivial components from folly for use in RocksDB. (The prospect of doing that on F14 has changed my mind on the best approach here.) But this change creates an optional integration where we can plug in components from folly at compile time, starting here with F14FastMap to replace std::unordered_map when possible (probably no public APIs for example). I have replaced the biggest CPU users of std::unordered_map with compile-time pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set. USE_FOLLY is always set in the Meta-internal buck build, and a simulation of that is in the Makefile for public CI testing. A full folly build is not needed, but checking out the full folly repo is much simpler for getting the dependency, and anything else we might want to optionally integrate in the future. Some picky details: * I don't think the distributed mutex stuff is actually used, so it was easy to remove. * I implemented an alternative to `folly::constexpr_log2` (which is much easier in C++17 than C++11) so that I could pull out the hard dependencies on `ConstexprMath.h` * I had to add noexcept move constructors/operators to some types to make F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a macro to make that easier in some common cases. * Updated Meta-internal buck build to use folly F14Map (always) No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a production integration for open source users. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546 Test Plan: CircleCI tests updated so that a couple of them use folly. Most internal unit & stress/crash tests updated to use Meta-internal latest folly. (Note: they should probably use buck but they currently use Makefile.) Example performance improvement: when filter partitions are pinned in cache, they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build a test that exercises that heavily. Build DB with ``` TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters ``` and test with (simultaneous runs with & without folly, ~20 times each to see convergence) ``` TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache ``` Average ops/s no folly: 26229.2 Average ops/s with folly: 26853.3 (+2.4%) Reviewed By: ajkr Differential Revision: D34181736 Pulled By: pdillinger fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
133 lines
4.1 KiB
C++
133 lines
4.1 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
#pragma once
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "port/port.h"
|
|
#include "rocksdb/slice.h"
|
|
#include "util/bloom_impl.h"
|
|
#include "util/hash.h"
|
|
#include "util/math.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
class Slice;
|
|
class Allocator;
|
|
class Logger;
|
|
|
|
// A legacy Bloom filter implementation used by Plain Table db format, for
|
|
// schema backward compatibility. Not for use in new filter applications.
|
|
class PlainTableBloomV1 {
|
|
public:
|
|
// allocator: pass allocator to bloom filter, hence trace the usage of memory
|
|
// total_bits: fixed total bits for the bloom
|
|
// num_probes: number of hash probes for a single key
|
|
// locality: If positive, optimize for cache line locality, 0 otherwise.
|
|
// hash_func: customized hash function
|
|
// huge_page_tlb_size: if >0, try to allocate bloom bytes from huge page TLB
|
|
// within this page size. Need to reserve huge pages for
|
|
// it to be allocated, like:
|
|
// sysctl -w vm.nr_hugepages=20
|
|
// See linux doc Documentation/vm/hugetlbpage.txt
|
|
explicit PlainTableBloomV1(uint32_t num_probes = 6);
|
|
void SetTotalBits(Allocator* allocator, uint32_t total_bits,
|
|
uint32_t locality, size_t huge_page_tlb_size,
|
|
Logger* logger);
|
|
|
|
~PlainTableBloomV1() {}
|
|
|
|
// Assuming single threaded access to this function.
|
|
void AddHash(uint32_t hash);
|
|
|
|
// Multithreaded access to this function is OK
|
|
bool MayContainHash(uint32_t hash) const;
|
|
|
|
void Prefetch(uint32_t hash);
|
|
|
|
uint32_t GetNumBlocks() const { return kNumBlocks; }
|
|
|
|
Slice GetRawData() const { return Slice(data_, GetTotalBits() / 8); }
|
|
|
|
void SetRawData(char* raw_data, uint32_t total_bits, uint32_t num_blocks = 0);
|
|
|
|
uint32_t GetTotalBits() const { return kTotalBits; }
|
|
|
|
bool IsInitialized() const { return kNumBlocks > 0 || kTotalBits > 0; }
|
|
|
|
private:
|
|
uint32_t kTotalBits;
|
|
uint32_t kNumBlocks;
|
|
const uint32_t kNumProbes;
|
|
|
|
char* data_;
|
|
|
|
static constexpr int LOG2_CACHE_LINE_SIZE =
|
|
ConstexprFloorLog2(CACHE_LINE_SIZE);
|
|
};
|
|
|
|
#if defined(_MSC_VER)
|
|
#pragma warning(push)
|
|
// local variable is initialized but not referenced
|
|
#pragma warning(disable : 4189)
|
|
#endif
|
|
inline void PlainTableBloomV1::Prefetch(uint32_t h) {
|
|
if (kNumBlocks != 0) {
|
|
uint32_t ignored;
|
|
LegacyLocalityBloomImpl</*ExtraRotates*/ true>::PrepareHashMayMatch(
|
|
h, kNumBlocks, data_, &ignored, LOG2_CACHE_LINE_SIZE);
|
|
}
|
|
}
|
|
#if defined(_MSC_VER)
|
|
#pragma warning(pop)
|
|
#endif
|
|
|
|
inline bool PlainTableBloomV1::MayContainHash(uint32_t h) const {
|
|
assert(IsInitialized());
|
|
if (kNumBlocks != 0) {
|
|
return LegacyLocalityBloomImpl<true>::HashMayMatch(
|
|
h, kNumBlocks, kNumProbes, data_, LOG2_CACHE_LINE_SIZE);
|
|
} else {
|
|
return LegacyNoLocalityBloomImpl::HashMayMatch(h, kTotalBits, kNumProbes,
|
|
data_);
|
|
}
|
|
}
|
|
|
|
inline void PlainTableBloomV1::AddHash(uint32_t h) {
|
|
assert(IsInitialized());
|
|
if (kNumBlocks != 0) {
|
|
LegacyLocalityBloomImpl<true>::AddHash(h, kNumBlocks, kNumProbes, data_,
|
|
LOG2_CACHE_LINE_SIZE);
|
|
} else {
|
|
LegacyNoLocalityBloomImpl::AddHash(h, kTotalBits, kNumProbes, data_);
|
|
}
|
|
}
|
|
|
|
class BloomBlockBuilder {
|
|
public:
|
|
static const std::string kBloomBlock;
|
|
|
|
explicit BloomBlockBuilder(uint32_t num_probes = 6) : bloom_(num_probes) {}
|
|
|
|
void SetTotalBits(Allocator* allocator, uint32_t total_bits,
|
|
uint32_t locality, size_t huge_page_tlb_size,
|
|
Logger* logger) {
|
|
bloom_.SetTotalBits(allocator, total_bits, locality, huge_page_tlb_size,
|
|
logger);
|
|
}
|
|
|
|
uint32_t GetNumBlocks() const { return bloom_.GetNumBlocks(); }
|
|
|
|
void AddKeysHashes(const std::vector<uint32_t>& keys_hashes);
|
|
|
|
Slice Finish();
|
|
|
|
private:
|
|
PlainTableBloomV1 bloom_;
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|