diff --git a/HISTORY.md b/HISTORY.md index 1b63af6ff1..722967431a 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -14,6 +14,7 @@ ### New Features * DB identity (`db_id`) and DB session identity (`db_session_id`) are added to table properties and stored in SST files. SST files generated from SstFileWriter and Repairer have DB identity “SST Writer” and “DB Repairer”, respectively. Their DB session IDs are generated in the same way as `DB::GetDbSessionId`. The session ID for SstFileWriter (resp., Repairer) resets every time `SstFileWriter::Open` (resp., `Repairer::Run`) is called. +* Added experimental option BlockBasedTableOptions::optimize_filters_for_memory for reducing allocated memory size of Bloom filters (~10% savings with Jemalloc) while preserving the same general accuracy. To have an effect, the option requires format_version=5 and malloc_usable_size. Enabling this option is forward and backward compatible with existing format_version=5. ### Bug Fixes * Fail recovery and report once hitting a physical log record checksum mismatch, while reading MANIFEST. RocksDB should not continue processing the MANIFEST any further. diff --git a/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h index 096c4b742f..8c9c9bfa6e 100644 --- a/db_stress_tool/db_stress_common.h +++ b/db_stress_tool/db_stress_common.h @@ -146,6 +146,7 @@ DECLARE_int32(reopen); DECLARE_double(bloom_bits); DECLARE_bool(use_block_based_filter); DECLARE_bool(partition_filters); +DECLARE_bool(optimize_filters_for_memory); DECLARE_int32(index_type); DECLARE_string(db); DECLARE_string(secondaries_base); diff --git a/db_stress_tool/db_stress_gflags.cc b/db_stress_tool/db_stress_gflags.cc index 2545ea4a6a..7a807753b5 100644 --- a/db_stress_tool/db_stress_gflags.cc +++ b/db_stress_tool/db_stress_gflags.cc @@ -361,6 +361,11 @@ DEFINE_bool(partition_filters, false, "use partitioned filters " "for block-based table"); +DEFINE_bool( + optimize_filters_for_memory, + ROCKSDB_NAMESPACE::BlockBasedTableOptions().optimize_filters_for_memory, + "Minimize memory footprint of filters"); + DEFINE_int32( index_type, static_cast( diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index e552867767..9c94d15e52 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -1762,6 +1762,8 @@ void StressTest::Open() { static_cast(FLAGS_index_block_restart_interval); block_based_options.filter_policy = filter_policy_; block_based_options.partition_filters = FLAGS_partition_filters; + block_based_options.optimize_filters_for_memory = + FLAGS_optimize_filters_for_memory; block_based_options.index_type = static_cast(FLAGS_index_type); options_.table_factory.reset( diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 47bf60e8f0..adec1aa4c6 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -200,6 +200,40 @@ struct BlockBasedTableOptions { // incompatible with block-based filters. bool partition_filters = false; + // EXPERIMENTAL Option to generate Bloom filters that minimize memory + // internal fragmentation. + // + // When false, malloc_usable_size is not available, or format_version < 5, + // filters are generated without regard to internal fragmentation when + // loaded into memory (historical behavior). When true (and + // malloc_usable_size is available and format_version >= 5), then Bloom + // filters are generated to "round up" and "round down" their sizes to + // minimize internal fragmentation when loaded into memory, assuming the + // reading DB has the same memory allocation characteristics as the + // generating DB. This option does not break forward or backward + // compatibility. + // + // While individual filters will vary in bits/key and false positive rate + // when setting is true, the implementation attempts to maintain a weighted + // average FP rate for filters consistent with this option set to false. + // + // With Jemalloc for example, this setting is expected to save about 10% of + // the memory footprint and block cache charge of filters, while increasing + // disk usage of filters by about 1-2% due to encoding efficiency losses + // with variance in bits/key. + // + // NOTE: Because some memory counted by block cache might be unmapped pages + // within internal fragmentation, this option can increase observed RSS + // memory usage. With cache_index_and_filter_blocks=true, this option makes + // the block cache better at using space it is allowed. + // + // NOTE: Do not set to true if you do not trust malloc_usable_size. With + // this option, RocksDB might access an allocated memory object beyond its + // original size if malloc_usable_size says it is safe to do so. While this + // can be considered bad practice, it should not produce undefined behavior + // unless malloc_usable_size is buggy or broken. + bool optimize_filters_for_memory = false; + // Use delta encoding to compress keys in blocks. // ReadOptions::pin_data requires this option to be disabled. // diff --git a/options/options_settable_test.cc b/options/options_settable_test.cc index 26b90ca683..a45a620350 100644 --- a/options/options_settable_test.cc +++ b/options/options_settable_test.cc @@ -167,6 +167,7 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) { "block_size_deviation=8;block_restart_interval=4; " "metadata_block_size=1024;" "partition_filters=false;" + "optimize_filters_for_memory=true;" "index_block_restart_interval=4;" "filter_policy=bloomfilter:4:true;whole_key_filtering=1;" "format_version=1;" diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index ad1d20ca9b..18dddaf3c0 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -268,6 +268,10 @@ static std::unordered_map {offsetof(struct BlockBasedTableOptions, partition_filters), OptionType::kBoolean, OptionVerificationType::kNormal, OptionTypeFlags::kNone, 0}}, + {"optimize_filters_for_memory", + {offsetof(struct BlockBasedTableOptions, optimize_filters_for_memory), + OptionType::kBoolean, OptionVerificationType::kNormal, + OptionTypeFlags::kNone, 0}}, {"filter_policy", {offsetof(struct BlockBasedTableOptions, filter_policy), OptionType::kUnknown, OptionVerificationType::kByNameAllowFromNull, diff --git a/table/block_based/filter_policy.cc b/table/block_based/filter_policy.cc index 5fc63fd88a..07fbf25fb8 100644 --- a/table/block_based/filter_policy.cc +++ b/table/block_based/filter_policy.cc @@ -28,9 +28,12 @@ namespace { // See description in FastLocalBloomImpl class FastLocalBloomBitsBuilder : public BuiltinFilterBitsBuilder { public: - explicit FastLocalBloomBitsBuilder(const int millibits_per_key) + // Non-null aggregate_rounding_balance implies optimize_filters_for_memory + explicit FastLocalBloomBitsBuilder( + const int millibits_per_key, + std::atomic* aggregate_rounding_balance) : millibits_per_key_(millibits_per_key), - num_probes_(FastLocalBloomImpl::ChooseNumProbes(millibits_per_key_)) { + aggregate_rounding_balance_(aggregate_rounding_balance) { assert(millibits_per_key >= 1000); } @@ -48,33 +51,36 @@ class FastLocalBloomBitsBuilder : public BuiltinFilterBitsBuilder { } virtual Slice Finish(std::unique_ptr* buf) override { + size_t num_entry = hash_entries_.size(); + std::unique_ptr mutable_buf; uint32_t len_with_metadata = - CalculateSpace(static_cast(hash_entries_.size())); - char* data = new char[len_with_metadata]; - memset(data, 0, len_with_metadata); + CalculateAndAllocate(num_entry, &mutable_buf, /*update_balance*/ true); - assert(data); + assert(mutable_buf); assert(len_with_metadata >= 5); + // Compute num_probes after any rounding / adjustments + int num_probes = GetNumProbes(num_entry, len_with_metadata); + uint32_t len = len_with_metadata - 5; if (len > 0) { - AddAllEntries(data, len); + AddAllEntries(mutable_buf.get(), len, num_probes); } + assert(hash_entries_.empty()); + // See BloomFilterPolicy::GetBloomBitsReader re: metadata // -1 = Marker for newer Bloom implementations - data[len] = static_cast(-1); + mutable_buf[len] = static_cast(-1); // 0 = Marker for this sub-implementation - data[len + 1] = static_cast(0); + mutable_buf[len + 1] = static_cast(0); // num_probes (and 0 in upper bits for 64-byte block size) - data[len + 2] = static_cast(num_probes_); + mutable_buf[len + 2] = static_cast(num_probes); // rest of metadata stays zero - const char* const_data = data; - buf->reset(const_data); - assert(hash_entries_.empty()); - - return Slice(data, len_with_metadata); + Slice rv(mutable_buf.get(), len_with_metadata); + *buf = std::move(mutable_buf); + return rv; } int CalculateNumEntry(const uint32_t bytes) override { @@ -84,26 +90,163 @@ class FastLocalBloomBitsBuilder : public BuiltinFilterBitsBuilder { } uint32_t CalculateSpace(const int num_entry) override { - uint32_t num_cache_lines = 0; - if (millibits_per_key_ > 0 && num_entry > 0) { - num_cache_lines = static_cast( - (int64_t{num_entry} * millibits_per_key_ + 511999) / 512000); - } - return num_cache_lines * 64 + /*metadata*/ 5; + // NB: the BuiltinFilterBitsBuilder API presumes len fits in uint32_t. + return static_cast( + CalculateAndAllocate(static_cast(num_entry), + /* buf */ nullptr, + /*update_balance*/ false)); } - double EstimatedFpRate(size_t keys, size_t bytes) override { - return FastLocalBloomImpl::EstimatedFpRate(keys, bytes - /*metadata*/ 5, - num_probes_, /*hash bits*/ 64); + // To choose size using malloc_usable_size, we have to actually allocate. + uint32_t CalculateAndAllocate(size_t num_entry, std::unique_ptr* buf, + bool update_balance) { + std::unique_ptr tmpbuf; + + // If not for cache line blocks in the filter, what would the target + // length in bytes be? + size_t raw_target_len = static_cast( + (uint64_t{num_entry} * millibits_per_key_ + 7999) / 8000); + + if (raw_target_len >= size_t{0xffffffc0}) { + // Max supported for this data structure implementation + raw_target_len = size_t{0xffffffc0}; + } + + // Round up to nearest multiple of 64 (block size). This adjustment is + // used for target FP rate only so that we don't receive complaints about + // lower FP rate vs. historic Bloom filter behavior. + uint32_t target_len = + static_cast(raw_target_len + 63) & ~uint32_t{63}; + + // Return value set to a default; overwritten in some cases + uint32_t rv = target_len + /* metadata */ 5; +#ifdef ROCKSDB_MALLOC_USABLE_SIZE + if (aggregate_rounding_balance_ != nullptr) { + // Do optimize_filters_for_memory, using malloc_usable_size. + // Approach: try to keep FP rate balance better than or on + // target (negative aggregate_rounding_balance_). We can then select a + // lower bound filter size (within reasonable limits) that gets us as + // close to on target as possible. We request allocation for that filter + // size and use malloc_usable_size to "round up" to the actual + // allocation size. + + // Although it can be considered bad practice to use malloc_usable_size + // to access an object beyond its original size, this approach should + // quite general: working for all allocators that properly support + // malloc_usable_size. + + // Race condition on balance is OK because it can only cause temporary + // skew in rounding up vs. rounding down, as long as updates are atomic + // and relative. + int64_t balance = aggregate_rounding_balance_->load(); + + double target_fp_rate = EstimatedFpRate(num_entry, target_len + 5); + double rv_fp_rate = target_fp_rate; + + if (balance < 0) { + // See formula for BloomFilterPolicy::aggregate_rounding_balance_ + double for_balance_fp_rate = + -balance / double{0x100000000} + target_fp_rate; + + // To simplify, we just try a few modified smaller sizes. This also + // caps how much we vary filter size vs. target, to avoid outlier + // behavior from excessive variance. + for (uint64_t maybe_len64 : + {uint64_t{3} * target_len / 4, uint64_t{13} * target_len / 16, + uint64_t{7} * target_len / 8, uint64_t{15} * target_len / 16}) { + uint32_t maybe_len = + static_cast(maybe_len64) & ~uint32_t{63}; + double maybe_fp_rate = EstimatedFpRate(num_entry, maybe_len + 5); + if (maybe_fp_rate <= for_balance_fp_rate) { + rv = maybe_len + /* metadata */ 5; + rv_fp_rate = maybe_fp_rate; + break; + } + } + } + + // Filter blocks are loaded into block cache with their block trailer. + // We need to make sure that's accounted for in choosing a + // fragmentation-friendly size. + const uint32_t kExtraPadding = kBlockTrailerSize; + size_t requested = rv + kExtraPadding; + + // Allocate and get usable size + tmpbuf.reset(new char[requested]); + size_t usable = malloc_usable_size(tmpbuf.get()); + + if (usable - usable / 4 > requested) { + // Ratio greater than 4/3 is too much for utilizing, if it's + // not a buggy or mislinked malloc_usable_size implementation. + // Non-linearity of FP rates with bits/key means rapidly + // diminishing returns in overall accuracy for additional + // storage on disk. + // Nothing to do, except assert that the result is accurate about + // the usable size. (Assignment never used.) + assert(tmpbuf[usable - 1] = 'x'); + } else if (usable > requested) { + // Adjust for reasonably larger usable size + size_t usable_len = (usable - kExtraPadding - /* metadata */ 5); + if (usable_len >= size_t{0xffffffc0}) { + // Max supported for this data structure implementation + usable_len = size_t{0xffffffc0}; + } + + rv = (static_cast(usable_len) & ~uint32_t{63}) + + /* metadata */ 5; + rv_fp_rate = EstimatedFpRate(num_entry, rv); + } else { + // Too small means bad malloc_usable_size + assert(usable == requested); + } + memset(tmpbuf.get(), 0, rv); + + if (update_balance) { + int64_t diff = static_cast((rv_fp_rate - target_fp_rate) * + double{0x100000000}); + *aggregate_rounding_balance_ += diff; + } + } +#else + (void)update_balance; +#endif // ROCKSDB_MALLOC_USABLE_SIZE + if (buf) { + if (tmpbuf) { + *buf = std::move(tmpbuf); + } else { + buf->reset(new char[rv]()); + } + } + return rv; + } + + double EstimatedFpRate(size_t keys, size_t len_with_metadata) override { + int num_probes = GetNumProbes(keys, len_with_metadata); + return FastLocalBloomImpl::EstimatedFpRate( + keys, len_with_metadata - /*metadata*/ 5, num_probes, /*hash bits*/ 64); } private: - void AddAllEntries(char* data, uint32_t len) { + // Compute num_probes after any rounding / adjustments + int GetNumProbes(size_t keys, size_t len_with_metadata) { + uint64_t millibits = uint64_t{len_with_metadata - 5} * 8000; + int actual_millibits_per_key = + static_cast(millibits / std::max(keys, size_t{1})); + // BEGIN XXX/TODO(peterd): preserving old/default behavior for now to + // minimize unit test churn. Remove this some time. + if (!aggregate_rounding_balance_) { + actual_millibits_per_key = millibits_per_key_; + } + // END XXX/TODO + return FastLocalBloomImpl::ChooseNumProbes(actual_millibits_per_key); + } + + void AddAllEntries(char* data, uint32_t len, int num_probes) { // Simple version without prefetching: // // for (auto h : hash_entries_) { // FastLocalBloomImpl::AddHash(Lower32of64(h), Upper32of64(h), len, - // num_probes_, data); + // num_probes, data); // } const size_t num_entries = hash_entries_.size(); @@ -129,7 +272,7 @@ class FastLocalBloomBitsBuilder : public BuiltinFilterBitsBuilder { uint32_t& hash_ref = hashes[i & kBufferMask]; uint32_t& byte_offset_ref = byte_offsets[i & kBufferMask]; // Process (add) - FastLocalBloomImpl::AddHashPrepared(hash_ref, num_probes_, + FastLocalBloomImpl::AddHashPrepared(hash_ref, num_probes, data + byte_offset_ref); // And buffer uint64_t h = hash_entries_.front(); @@ -141,13 +284,16 @@ class FastLocalBloomBitsBuilder : public BuiltinFilterBitsBuilder { // Finish processing for (i = 0; i <= kBufferMask && i < num_entries; ++i) { - FastLocalBloomImpl::AddHashPrepared(hashes[i], num_probes_, + FastLocalBloomImpl::AddHashPrepared(hashes[i], num_probes, data + byte_offsets[i]); } } + // Target allocation per added key, in thousandths of a bit. int millibits_per_key_; - int num_probes_; + // See BloomFilterPolicy::aggregate_rounding_balance_. If nullptr, + // always "round up" like historic behavior. + std::atomic* aggregate_rounding_balance_; // A deque avoids unnecessary copying of already-saved values // and has near-minimal peak memory use. std::deque hash_entries_; @@ -457,7 +603,7 @@ const std::vector BloomFilterPolicy::kAllUserModes = { }; BloomFilterPolicy::BloomFilterPolicy(double bits_per_key, Mode mode) - : mode_(mode), warned_(false) { + : mode_(mode), warned_(false), aggregate_rounding_balance_(0) { // Sanitize bits_per_key if (bits_per_key < 1.0) { bits_per_key = 1.0; @@ -549,6 +695,7 @@ FilterBitsBuilder* BloomFilterPolicy::GetFilterBitsBuilder() const { FilterBitsBuilder* BloomFilterPolicy::GetBuilderWithContext( const FilterBuildingContext& context) const { Mode cur = mode_; + bool offm = context.table_options.optimize_filters_for_memory; // Unusual code construction so that we can have just // one exhaustive switch without (risky) recursion for (int i = 0; i < 2; ++i) { @@ -563,7 +710,8 @@ FilterBitsBuilder* BloomFilterPolicy::GetBuilderWithContext( case kDeprecatedBlock: return nullptr; case kFastLocalBloom: - return new FastLocalBloomBitsBuilder(millibits_per_key_); + return new FastLocalBloomBitsBuilder( + millibits_per_key_, offm ? &aggregate_rounding_balance_ : nullptr); case kLegacyBloom: if (whole_bits_per_key_ >= 14 && context.info_log && !warned_.load(std::memory_order_relaxed)) { diff --git a/table/block_based/filter_policy_internal.h b/table/block_based/filter_policy_internal.h index 2ca9dc8595..783373b262 100644 --- a/table/block_based/filter_policy_internal.h +++ b/table/block_based/filter_policy_internal.h @@ -135,6 +135,16 @@ class BloomFilterPolicy : public FilterPolicy { // only report once per BloomFilterPolicy instance, to keep the noise down.) mutable std::atomic warned_; + // State for implementing optimize_filters_for_memory. Essentially, this + // tracks a surplus or deficit in total FP rate of filters generated by + // builders under this policy vs. what would have been generated without + // optimize_filters_for_memory. + // + // To avoid floating point weirdness, the actual value is + // Sum over all generated filters f: + // (predicted_fp_rate(f) - predicted_fp_rate(f|o_f_f_m=false)) * 2^32 + mutable std::atomic aggregate_rounding_balance_; + // For newer Bloom filter implementation(s) FilterBitsReader* GetBloomBitsReader(const Slice& contents) const; }; diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 2233810980..33386fb798 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -467,6 +467,11 @@ DEFINE_bool(partition_index, false, "Partition index blocks"); DEFINE_bool(index_with_first_key, false, "Include first key in the index"); +DEFINE_bool( + optimize_filters_for_memory, + ROCKSDB_NAMESPACE::BlockBasedTableOptions().optimize_filters_for_memory, + "Minimize memory footprint of filters"); + DEFINE_int64( index_shortening_mode, 2, "mode to shorten index: 0 for no shortening; 1 for only shortening " @@ -3821,6 +3826,8 @@ class Benchmark { default: fprintf(stderr, "Unknown key shortening mode\n"); } + block_based_options.optimize_filters_for_memory = + FLAGS_optimize_filters_for_memory; block_based_options.index_shortening = index_shortening; if (cache_ == nullptr) { block_based_options.no_block_cache = true; diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index e369a6f373..ded7894815 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -73,6 +73,7 @@ default_params = { "mmap_read": lambda: random.randint(0, 1), "nooverwritepercent": 1, "open_files": lambda : random.choice([-1, -1, 100, 500000]), + "optimize_filters_for_memory": lambda: random.randint(0, 1), "partition_filters": lambda: random.randint(0, 1), "pause_background_one_in": 1000000, "prefixpercent": 5, diff --git a/util/bloom_test.cc b/util/bloom_test.cc index 5dda7def64..0fea9c6625 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -21,6 +21,7 @@ int main() { #include "logging/logging.h" #include "memory/arena.h" +#include "port/jemalloc_helper.h" #include "rocksdb/filter_policy.h" #include "table/block_based/filter_policy_internal.h" #include "test_util/testharness.h" @@ -252,8 +253,10 @@ TEST_F(BlockBasedBloomTest, Schema) { // Different bits-per-byte class FullBloomTest : public testing::TestWithParam { - private: + protected: BlockBasedTableOptions table_options_; + + private: std::shared_ptr& policy_; std::unique_ptr bits_builder_; std::unique_ptr bits_reader_; @@ -499,6 +502,77 @@ TEST_P(FullBloomTest, FullVaryingLengths) { ASSERT_LE(mediocre_filters, good_filters/5); } +TEST_P(FullBloomTest, OptimizeForMemory) { + char buffer[sizeof(int)]; + for (bool offm : {true, false}) { + table_options_.optimize_filters_for_memory = offm; + ResetPolicy(); + Random32 rnd(12345); + uint64_t total_size = 0; + uint64_t total_mem = 0; + int64_t total_keys = 0; + double total_fp_rate = 0; + constexpr int nfilters = 100; + for (int i = 0; i < nfilters; ++i) { + int nkeys = static_cast(rnd.Uniformish(10000)) + 100; + Reset(); + for (int j = 0; j < nkeys; ++j) { + Add(Key(j, buffer)); + } + Build(); + size_t size = FilterData().size(); + total_size += size; + // optimize_filters_for_memory currently depends on malloc_usable_size + // but we run the rest of the test to ensure no bad behavior without it. +#ifdef ROCKSDB_MALLOC_USABLE_SIZE + size = malloc_usable_size(const_cast(FilterData().data())); +#endif // ROCKSDB_MALLOC_USABLE_SIZE + total_mem += size; + total_keys += nkeys; + total_fp_rate += FalsePositiveRate(); + } + EXPECT_LE(total_fp_rate / double{nfilters}, 0.011); + EXPECT_GE(total_fp_rate / double{nfilters}, 0.008); + + int64_t ex_min_total_size = int64_t{FLAGS_bits_per_key} * total_keys / 8; + EXPECT_GE(static_cast(total_size), ex_min_total_size); + + int64_t blocked_bloom_overhead = nfilters * (CACHE_LINE_SIZE + 5); + if (GetParam() == BloomFilterPolicy::kLegacyBloom) { + // this config can add extra cache line to make odd number + blocked_bloom_overhead += nfilters * CACHE_LINE_SIZE; + } + + EXPECT_GE(total_mem, total_size); + + // optimize_filters_for_memory not implemented with legacy Bloom + if (offm && GetParam() != BloomFilterPolicy::kLegacyBloom) { + // This value can include a small extra penalty for kExtraPadding + fprintf(stderr, "Internal fragmentation (optimized): %g%%\n", + (total_mem - total_size) * 100.0 / total_size); + // Less than 1% internal fragmentation + EXPECT_LE(total_mem, total_size * 101 / 100); + // Up to 2% storage penalty + EXPECT_LE(static_cast(total_size), + ex_min_total_size * 102 / 100 + blocked_bloom_overhead); + } else { + fprintf(stderr, "Internal fragmentation (not optimized): %g%%\n", + (total_mem - total_size) * 100.0 / total_size); + // TODO: add control checks for more allocators? +#ifdef ROCKSDB_JEMALLOC + fprintf(stderr, "Jemalloc detected? %d\n", HasJemalloc()); + if (HasJemalloc()) { + // More than 5% internal fragmentation + EXPECT_GE(total_mem, total_size * 105 / 100); + } +#endif // ROCKSDB_JEMALLOC + // No storage penalty, just usual overhead + EXPECT_LE(static_cast(total_size), + ex_min_total_size + blocked_bloom_overhead); + } + } +} + namespace { inline uint32_t SelectByCacheLineSize(uint32_t for64, uint32_t for128, uint32_t for256) { diff --git a/util/filter_bench.cc b/util/filter_bench.cc index 518e3e15f7..ec1ece0fcf 100644 --- a/util/filter_bench.cc +++ b/util/filter_bench.cc @@ -88,6 +88,9 @@ DEFINE_bool(net_includes_hashing, false, "(if not, dry run will include hashing) " "(build times always include hashing)"); +DEFINE_bool(optimize_filters_for_memory, false, + "Setting for BlockBasedTableOptions::optimize_filters_for_memory"); + DEFINE_bool(quick, false, "Run more limited set of tests, fewer queries"); DEFINE_bool(best_case, false, "Run limited tests only for best-case"); @@ -278,6 +281,8 @@ struct FilterBench : public MockBlockBasedTableTester { kms_.emplace_back(FLAGS_key_size < 8 ? 8 : FLAGS_key_size); } ioptions_.info_log = &stderr_logger_; + table_options_.optimize_filters_for_memory = + FLAGS_optimize_filters_for_memory; } void Go(); @@ -337,6 +342,7 @@ void FilterBench::Go() { std::unique_ptr builder; size_t total_memory_used = 0; + size_t total_size = 0; size_t total_keys_added = 0; #ifdef PREDICT_FP_RATE double weighted_predicted_fp_rate = 0.0; @@ -355,7 +361,7 @@ void FilterBench::Go() { true); infos_.clear(); - while ((working_mem_size_mb == 0 || total_memory_used < max_mem) && + while ((working_mem_size_mb == 0 || total_size < max_mem) && total_keys_added < max_total_keys) { uint32_t filter_id = random_.Next(); uint32_t keys_to_add = FLAGS_average_keys_per_filter + @@ -405,7 +411,11 @@ void FilterBench::Go() { info.full_block_reader_.reset( new FullFilterBlockReader(table_.get(), std::move(block))); } - total_memory_used += info.filter_.size(); + total_size += info.filter_.size(); +#ifdef ROCKSDB_MALLOC_USABLE_SIZE + total_memory_used += + malloc_usable_size(const_cast(info.filter_.data())); +#endif // ROCKSDB_MALLOC_USABLE_SIZE total_keys_added += keys_to_add; } @@ -413,11 +423,17 @@ void FilterBench::Go() { double ns = double(elapsed_nanos) / total_keys_added; std::cout << "Build avg ns/key: " << ns << std::endl; std::cout << "Number of filters: " << infos_.size() << std::endl; - std::cout << "Total memory (MB): " << total_memory_used / 1024.0 / 1024.0 - << std::endl; + std::cout << "Total size (MB): " << total_size / 1024.0 / 1024.0 << std::endl; + if (total_memory_used > 0) { + std::cout << "Reported total allocated memory (MB): " + << total_memory_used / 1024.0 / 1024.0 << std::endl; + std::cout << "Reported internal fragmentation: " + << (total_memory_used - total_size) * 100.0 / total_size << "%" + << std::endl; + } - double bpk = total_memory_used * 8.0 / total_keys_added; - std::cout << "Bits/key actual: " << bpk << std::endl; + double bpk = total_size * 8.0 / total_keys_added; + std::cout << "Bits/key stored: " << bpk << std::endl; #ifdef PREDICT_FP_RATE std::cout << "Predicted FP rate %: " << 100.0 * (weighted_predicted_fp_rate / total_keys_added)