mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-27 11:43:49 +00:00
d20b8cfaa1
Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
72 lines
2.3 KiB
C++
72 lines
2.3 KiB
C++
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
#pragma once
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
#include <string>
|
|
#include "rocksdb/table.h"
|
|
#include "util/murmurhash.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
const uint32_t kCuckooMurmurSeedMultiplier = 816922183;
|
|
static inline uint64_t CuckooHash(
|
|
const Slice& user_key, uint32_t hash_cnt, uint64_t table_size_minus_one,
|
|
uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t)) {
|
|
#ifndef NDEBUG
|
|
// This part is used only in unit tests.
|
|
if (get_slice_hash != nullptr) {
|
|
return get_slice_hash(user_key, hash_cnt, table_size_minus_one + 1);
|
|
}
|
|
#endif
|
|
return MurmurHash(user_key.data(), user_key.size(),
|
|
kCuckooMurmurSeedMultiplier * hash_cnt) & table_size_minus_one;
|
|
}
|
|
|
|
// Cuckoo Table is designed for applications that require fast point lookups
|
|
// but not fast range scans.
|
|
//
|
|
// Some assumptions:
|
|
// - Key length and Value length are fixed.
|
|
// - Does not support Snapshot.
|
|
// - Does not support Merge operations.
|
|
class CuckooTableFactory : public TableFactory {
|
|
public:
|
|
CuckooTableFactory(double hash_table_ratio, uint32_t max_search_depth,
|
|
uint32_t cuckoo_block_size)
|
|
: hash_table_ratio_(hash_table_ratio),
|
|
max_search_depth_(max_search_depth),
|
|
cuckoo_block_size_(cuckoo_block_size) {}
|
|
~CuckooTableFactory() {}
|
|
|
|
const char* Name() const override { return "CuckooTable"; }
|
|
|
|
Status NewTableReader(
|
|
const Options& options, const EnvOptions& soptions,
|
|
const InternalKeyComparator& internal_comparator,
|
|
unique_ptr<RandomAccessFile>&& file, uint64_t file_size,
|
|
unique_ptr<TableReader>* table) const override;
|
|
|
|
TableBuilder* NewTableBuilder(const Options& options,
|
|
const InternalKeyComparator& icomparator, WritableFile* file,
|
|
CompressionType compression_type) const override;
|
|
|
|
// Sanitizes the specified DB Options.
|
|
Status SanitizeDBOptions(DBOptions* db_opts) const override {
|
|
return Status::OK();
|
|
}
|
|
|
|
std::string GetPrintableTableOptions() const override;
|
|
|
|
private:
|
|
const double hash_table_ratio_;
|
|
const uint32_t max_search_depth_;
|
|
const uint32_t cuckoo_block_size_;
|
|
};
|
|
|
|
} // namespace rocksdb
|
|
#endif // ROCKSDB_LITE
|