mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
51af7c326c
Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
518 lines
21 KiB
C++
518 lines
21 KiB
C++
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
#include <vector>
|
|
#include <string>
|
|
#include <map>
|
|
#include <utility>
|
|
|
|
#include "table/meta_blocks.h"
|
|
#include "table/cuckoo_table_builder.h"
|
|
#include "util/testharness.h"
|
|
#include "util/testutil.h"
|
|
|
|
namespace rocksdb {
|
|
extern const uint64_t kCuckooTableMagicNumber;
|
|
|
|
namespace {
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
|
|
|
|
uint64_t GetSliceHash(const Slice& s, uint32_t index,
|
|
uint64_t max_num_buckets) {
|
|
return hash_map[s.ToString()][index];
|
|
}
|
|
} // namespace
|
|
|
|
class CuckooBuilderTest {
|
|
public:
|
|
CuckooBuilderTest() {
|
|
env_ = Env::Default();
|
|
Options options;
|
|
options.allow_mmap_reads = true;
|
|
env_options_ = EnvOptions(options);
|
|
}
|
|
|
|
void CheckFileContents(const std::vector<std::string>& keys,
|
|
const std::vector<std::string>& values,
|
|
const std::vector<uint64_t>& expected_locations,
|
|
std::string expected_unused_bucket, uint64_t expected_table_size,
|
|
uint32_t expected_num_hash_func, bool expected_is_last_level,
|
|
uint32_t expected_cuckoo_block_size = 1) {
|
|
// Read file
|
|
unique_ptr<RandomAccessFile> read_file;
|
|
ASSERT_OK(env_->NewRandomAccessFile(fname, &read_file, env_options_));
|
|
uint64_t read_file_size;
|
|
ASSERT_OK(env_->GetFileSize(fname, &read_file_size));
|
|
|
|
// Assert Table Properties.
|
|
TableProperties* props = nullptr;
|
|
ASSERT_OK(ReadTableProperties(read_file.get(), read_file_size,
|
|
kCuckooTableMagicNumber, env_, nullptr, &props));
|
|
ASSERT_EQ(props->num_entries, keys.size());
|
|
ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
|
|
ASSERT_EQ(props->data_size, expected_unused_bucket.size() *
|
|
(expected_table_size + expected_cuckoo_block_size - 1));
|
|
ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len);
|
|
|
|
// Check unused bucket.
|
|
std::string unused_key = props->user_collected_properties[
|
|
CuckooTablePropertyNames::kEmptyKey];
|
|
ASSERT_EQ(expected_unused_bucket.substr(0,
|
|
props->fixed_key_len), unused_key);
|
|
|
|
uint32_t value_len_found =
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
CuckooTablePropertyNames::kValueLength].data());
|
|
ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
|
|
ASSERT_EQ(props->raw_value_size, values.size()*value_len_found);
|
|
const uint64_t table_size =
|
|
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
|
|
CuckooTablePropertyNames::kHashTableSize].data());
|
|
ASSERT_EQ(expected_table_size, table_size);
|
|
const uint32_t num_hash_func_found =
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
CuckooTablePropertyNames::kNumHashFunc].data());
|
|
ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
|
|
const uint32_t cuckoo_block_size =
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
CuckooTablePropertyNames::kCuckooBlockSize].data());
|
|
ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
|
|
const bool is_last_level_found =
|
|
*reinterpret_cast<const bool*>(props->user_collected_properties[
|
|
CuckooTablePropertyNames::kIsLastLevel].data());
|
|
ASSERT_EQ(expected_is_last_level, is_last_level_found);
|
|
delete props;
|
|
|
|
// Check contents of the bucket.
|
|
std::vector<bool> keys_found(keys.size(), false);
|
|
uint32_t bucket_size = expected_unused_bucket.size();
|
|
for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
|
|
Slice read_slice;
|
|
ASSERT_OK(read_file->Read(i*bucket_size, bucket_size,
|
|
&read_slice, nullptr));
|
|
uint32_t key_idx = std::find(expected_locations.begin(),
|
|
expected_locations.end(), i) - expected_locations.begin();
|
|
if (key_idx == keys.size()) {
|
|
// i is not one of the expected locaitons. Empty bucket.
|
|
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
|
} else {
|
|
keys_found[key_idx] = true;
|
|
ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0);
|
|
}
|
|
}
|
|
for (auto key_found : keys_found) {
|
|
// Check that all keys were found.
|
|
ASSERT_TRUE(key_found);
|
|
}
|
|
}
|
|
|
|
std::string GetInternalKey(Slice user_key, bool zero_seqno) {
|
|
IterKey ikey;
|
|
ikey.SetInternalKey(user_key, zero_seqno ? 0 : 1000, kTypeValue);
|
|
return ikey.GetKey().ToString();
|
|
}
|
|
|
|
uint64_t NextPowOf2(uint64_t num) {
|
|
uint64_t n = 2;
|
|
while (n <= num) {
|
|
n *= 2;
|
|
}
|
|
return n;
|
|
}
|
|
|
|
Env* env_;
|
|
EnvOptions env_options_;
|
|
std::string fname;
|
|
const double kHashTableRatio = 0.9;
|
|
};
|
|
|
|
TEST(CuckooBuilderTest, SuccessWithEmptyFile) {
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/EmptyFile";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
4, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
ASSERT_EQ(0UL, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
CheckFileContents({}, {}, {}, "", 0, 2, false);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
{user_keys[3], {3, 4, 5, 6}}
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/NoCollisionFullKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, false);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/WithCollisionFullKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 4, false);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
uint32_t cuckoo_block_size = 2;
|
|
fname = test::TmpDir() + "/WithCollisionFullKey2";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), cuckoo_block_size, false,
|
|
GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WithCollisionPathFullKey) {
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
// Finally insert an element with hash value somewhere in the middle
|
|
// so that it displaces all the elements after that.
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
"key04", "key05"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1}},
|
|
{user_keys[1], {1, 2}},
|
|
{user_keys[2], {2, 3}},
|
|
{user_keys[3], {3, 4}},
|
|
{user_keys[4], {0, 2}},
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/WithCollisionPathFullKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, false);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
"key04", "key05"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1}},
|
|
{user_keys[1], {1, 2}},
|
|
{user_keys[2], {3, 4}},
|
|
{user_keys[3], {4, 5}},
|
|
{user_keys[4], {0, 3}},
|
|
};
|
|
std::vector<uint64_t> expected_locations = {2, 1, 3, 4, 0};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 2, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, false, 2);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
{user_keys[3], {3, 4, 5, 6}}
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/NoCollisionUserKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = user_keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/WithCollisionUserKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = user_keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 4, true);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, WithCollisionPathUserKey) {
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
"key04", "key05"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1}},
|
|
{user_keys[1], {1, 2}},
|
|
{user_keys[2], {2, 3}},
|
|
{user_keys[3], {3, 4}},
|
|
{user_keys[4], {0, 2}},
|
|
};
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/WithCollisionPathUserKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 2, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
uint32_t bucket_size = user_keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(writable_file->Close());
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
// Finally try inserting an element with hash value somewhere in the middle
|
|
// and it should fail because the no. of elements to displace is too high.
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
"key04", "key05"};
|
|
hash_map = {
|
|
{user_keys[0], {0, 1}},
|
|
{user_keys[1], {1, 2}},
|
|
{user_keys[2], {2, 3}},
|
|
{user_keys[3], {3, 4}},
|
|
{user_keys[4], {0, 1}},
|
|
};
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/WithCollisionPathUserKey";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 2, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], false)), Slice("value"));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
|
ASSERT_OK(writable_file->Close());
|
|
}
|
|
|
|
TEST(CuckooBuilderTest, FailWhenSameKeyInserted) {
|
|
hash_map = {{"repeatedkey", {0, 1, 2, 3}}};
|
|
uint32_t num_hash_fun = 4;
|
|
std::string user_key = "repeatedkey";
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
fname = test::TmpDir() + "/FailWhenSameKeyInserted";
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, GetSliceHash);
|
|
ASSERT_OK(builder.status());
|
|
|
|
builder.Add(Slice(GetInternalKey(user_key, false)), Slice("value1"));
|
|
ASSERT_EQ(builder.NumEntries(), 1u);
|
|
ASSERT_OK(builder.status());
|
|
builder.Add(Slice(GetInternalKey(user_key, true)), Slice("value2"));
|
|
ASSERT_EQ(builder.NumEntries(), 2u);
|
|
ASSERT_OK(builder.status());
|
|
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
|
ASSERT_OK(writable_file->Close());
|
|
}
|
|
} // namespace rocksdb
|
|
|
|
int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); }
|