mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 14:31:35 +00:00
06e593376c
Summary: ## Context/Summary Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity. For that, this PR does the following: - Tag different write IOs by passing down and converting WriteOptions to IOOptions - Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS Some related code refactory to make implementation cleaner: - Blob stats - Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info. - Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write. - Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority - Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification - Build table - TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables - Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder. This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more - Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority ## Test ### db bench Flush ``` ./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100 rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377 rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377 rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 ``` compaction, db oopen ``` Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1 rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279 rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213 rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66 ``` blob stats - just to make sure they aren't broken by this PR ``` Integrated Blob DB Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1 pre-PR: rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600 rocksdb.blobdb.blob.file.synced COUNT : 1 rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 post-PR: rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614 - COUNT is higher and values are smaller as it includes header and footer write - COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164 rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same) rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same) ``` ``` Stacked Blob DB Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench pre-PR: rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876 rocksdb.blobdb.blob.file.synced COUNT : 8 rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 post-PR: rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924 - COUNT is higher and values are smaller as it includes header and footer write - COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164 rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same) rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same) ``` ### Rehearsal CI stress test Trigger 3 full runs of all our CI stress tests ### Performance Flush ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark; enable_statistics = true Pre-pr: avg 507515519.3 ns 497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908, Post-pr: avg 511971266.5 ns, regressed 0.88% 502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408, ``` Compaction ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark Pre-pr: avg 495346098.30 ns 492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846 Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97% 502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007 ``` Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats) ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark Pre-pr: avg 3848.10 ns 3814,3838,3839,3848,3854,3854,3854,3860,3860,3860 Post-pr: avg 3874.20 ns, regressed 0.68% 3863,3867,3871,3874,3875,3877,3877,3877,3880,3881 ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910 Reviewed By: ajkr Differential Revision: D49788060 Pulled By: hx235 fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
631 lines
27 KiB
C++
631 lines
27 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
#include "table/cuckoo/cuckoo_table_builder.h"
|
|
|
|
#include <map>
|
|
#include <string>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
#include "file/writable_file_writer.h"
|
|
#include "rocksdb/db.h"
|
|
#include "rocksdb/file_system.h"
|
|
#include "table/meta_blocks.h"
|
|
#include "test_util/testharness.h"
|
|
#include "test_util/testutil.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
extern const uint64_t kCuckooTableMagicNumber;
|
|
|
|
namespace {
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
|
|
|
|
uint64_t GetSliceHash(const Slice& s, uint32_t index,
|
|
uint64_t /*max_num_buckets*/) {
|
|
return hash_map[s.ToString()][index];
|
|
}
|
|
} // namespace
|
|
|
|
class CuckooBuilderTest : public testing::Test {
|
|
public:
|
|
CuckooBuilderTest() {
|
|
env_ = Env::Default();
|
|
Options options;
|
|
options.allow_mmap_reads = true;
|
|
file_options_ = FileOptions(options);
|
|
}
|
|
|
|
void CheckFileContents(const std::vector<std::string>& keys,
|
|
const std::vector<std::string>& values,
|
|
const std::vector<uint64_t>& expected_locations,
|
|
std::string expected_unused_bucket,
|
|
uint64_t expected_table_size,
|
|
uint32_t expected_num_hash_func,
|
|
bool expected_is_last_level,
|
|
uint32_t expected_cuckoo_block_size = 1) {
|
|
uint64_t num_deletions = 0;
|
|
for (const auto& key : keys) {
|
|
ParsedInternalKey parsed;
|
|
Status pik_status =
|
|
ParseInternalKey(key, &parsed, true /* log_err_key */);
|
|
if (pik_status.ok() && parsed.type == kTypeDeletion) {
|
|
num_deletions++;
|
|
}
|
|
}
|
|
// Read file
|
|
uint64_t read_file_size;
|
|
ASSERT_OK(env_->GetFileSize(fname, &read_file_size));
|
|
std::unique_ptr<RandomAccessFileReader> file_reader;
|
|
ASSERT_OK(RandomAccessFileReader::Create(
|
|
env_->GetFileSystem(), fname, file_options_, &file_reader, nullptr));
|
|
|
|
Options options;
|
|
options.allow_mmap_reads = true;
|
|
ImmutableOptions ioptions(options);
|
|
|
|
// Assert Table Properties.
|
|
std::unique_ptr<TableProperties> props;
|
|
const ReadOptions read_options;
|
|
ASSERT_OK(ReadTableProperties(file_reader.get(), read_file_size,
|
|
kCuckooTableMagicNumber, ioptions,
|
|
read_options, &props));
|
|
// Check unused bucket.
|
|
std::string unused_key =
|
|
props->user_collected_properties[CuckooTablePropertyNames::kEmptyKey];
|
|
ASSERT_EQ(expected_unused_bucket.substr(0, props->fixed_key_len),
|
|
unused_key);
|
|
|
|
uint64_t value_len_found = *reinterpret_cast<const uint64_t*>(
|
|
props->user_collected_properties[CuckooTablePropertyNames::kValueLength]
|
|
.data());
|
|
ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
|
|
ASSERT_EQ(props->raw_value_size, values.size() * value_len_found);
|
|
const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
|
|
props
|
|
->user_collected_properties
|
|
[CuckooTablePropertyNames::kHashTableSize]
|
|
.data());
|
|
ASSERT_EQ(expected_table_size, table_size);
|
|
const uint32_t num_hash_func_found = *reinterpret_cast<const uint32_t*>(
|
|
props->user_collected_properties[CuckooTablePropertyNames::kNumHashFunc]
|
|
.data());
|
|
ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
|
|
const uint32_t cuckoo_block_size = *reinterpret_cast<const uint32_t*>(
|
|
props
|
|
->user_collected_properties
|
|
[CuckooTablePropertyNames::kCuckooBlockSize]
|
|
.data());
|
|
ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
|
|
const bool is_last_level_found = *reinterpret_cast<const bool*>(
|
|
props->user_collected_properties[CuckooTablePropertyNames::kIsLastLevel]
|
|
.data());
|
|
ASSERT_EQ(expected_is_last_level, is_last_level_found);
|
|
|
|
ASSERT_EQ(props->num_entries, keys.size());
|
|
ASSERT_EQ(props->num_deletions, num_deletions);
|
|
ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
|
|
ASSERT_EQ(props->data_size,
|
|
expected_unused_bucket.size() *
|
|
(expected_table_size + expected_cuckoo_block_size - 1));
|
|
ASSERT_EQ(props->raw_key_size, keys.size() * props->fixed_key_len);
|
|
ASSERT_EQ(props->column_family_id, 0);
|
|
ASSERT_EQ(props->column_family_name, kDefaultColumnFamilyName);
|
|
|
|
// Check contents of the bucket.
|
|
std::vector<bool> keys_found(keys.size(), false);
|
|
size_t bucket_size = expected_unused_bucket.size();
|
|
for (uint32_t i = 0; i + 1 < table_size + cuckoo_block_size; ++i) {
|
|
Slice read_slice;
|
|
ASSERT_OK(file_reader->Read(IOOptions(), i * bucket_size, bucket_size,
|
|
&read_slice, nullptr, nullptr));
|
|
size_t key_idx =
|
|
std::find(expected_locations.begin(), expected_locations.end(), i) -
|
|
expected_locations.begin();
|
|
if (key_idx == keys.size()) {
|
|
// i is not one of the expected locations. Empty bucket.
|
|
if (read_slice.data() == nullptr) {
|
|
ASSERT_EQ(0, expected_unused_bucket.size());
|
|
} else {
|
|
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
|
}
|
|
} else {
|
|
keys_found[key_idx] = true;
|
|
ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0);
|
|
}
|
|
}
|
|
for (auto key_found : keys_found) {
|
|
// Check that all keys wereReader found.
|
|
ASSERT_TRUE(key_found);
|
|
}
|
|
}
|
|
|
|
std::string GetInternalKey(Slice user_key, bool zero_seqno,
|
|
ValueType type = kTypeValue) {
|
|
IterKey ikey;
|
|
ikey.SetInternalKey(user_key, zero_seqno ? 0 : 1000, type);
|
|
return ikey.GetInternalKey().ToString();
|
|
}
|
|
|
|
uint64_t NextPowOf2(uint64_t num) {
|
|
uint64_t n = 2;
|
|
while (n <= num) {
|
|
n *= 2;
|
|
}
|
|
return n;
|
|
}
|
|
|
|
uint64_t GetExpectedTableSize(uint64_t num) {
|
|
return NextPowOf2(static_cast<uint64_t>(num / kHashTableRatio));
|
|
}
|
|
|
|
Env* env_;
|
|
FileOptions file_options_;
|
|
std::string fname;
|
|
const double kHashTableRatio = 0.9;
|
|
};
|
|
|
|
TEST_F(CuckooBuilderTest, SuccessWithEmptyFile) {
|
|
std::unique_ptr<WritableFile> writable_file;
|
|
fname = test::PerThreadDBPath("EmptyFile");
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, 4, 100,
|
|
BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
ASSERT_EQ(0UL, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
CheckFileContents({}, {}, {}, "", 2, 2, false);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
|
|
for (auto type : {kTypeValue, kTypeDeletion}) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values;
|
|
if (type == kTypeValue) {
|
|
values = {"v01", "v02", "v03", "v04"};
|
|
} else {
|
|
values = {"", "", "", ""};
|
|
}
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
{user_keys[3], {3, 4, 5, 6}}};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false, type));
|
|
}
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
fname = test::PerThreadDBPath("NoCollisionFullKey");
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
|
|
expected_table_size, 2, false);
|
|
}
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
fname = test::PerThreadDBPath("WithCollisionFullKey");
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
|
|
expected_table_size, 4, false);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
uint32_t cuckoo_block_size = 2;
|
|
fname = test::PerThreadDBPath("WithCollisionFullKey2");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(
|
|
file_writer.get(), kHashTableRatio, num_hash_fun, 100,
|
|
BytewiseComparator(), cuckoo_block_size, false, false, GetSliceHash,
|
|
0 /* column_family_id */, kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
|
|
expected_table_size, 3, false, cuckoo_block_size);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
// Finally insert an element with hash value somewhere in the middle
|
|
// so that it displaces all the elements after that.
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
|
|
"key05"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
|
|
{user_keys[3], {3, 4}}, {user_keys[4], {0, 2}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("WithCollisionPathFullKey");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
|
|
expected_table_size, 2, false);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
|
|
"key05"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {3, 4}},
|
|
{user_keys[3], {4, 5}}, {user_keys[4], {0, 3}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {2, 1, 3, 4, 0};
|
|
std::vector<std::string> keys;
|
|
for (auto& user_key : user_keys) {
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
}
|
|
uint64_t expected_table_size = GetExpectedTableSize(keys.size());
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("WithCollisionPathFullKeyAndCuckooBlock");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 2, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
|
|
expected_table_size, 2, false, 2);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
{user_keys[3], {3, 4, 5, 6}}};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("NoCollisionUserKey");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
|
|
uint32_t num_hash_fun = 4;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
{user_keys[3], {0, 1, 2, 3}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("WithCollisionUserKey");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 4, true);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
|
|
"key05"};
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
|
|
{user_keys[3], {3, 4}}, {user_keys[4], {0, 2}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("WithCollisionPathUserKey");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
2, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
|
ASSERT_OK(builder.Finish());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
|
|
|
std::string expected_unused_bucket = "key00";
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
CheckFileContents(user_keys, values, expected_locations,
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
// Finally try inserting an element with hash value somewhere in the middle
|
|
// and it should fail because the no. of elements to displace is too high.
|
|
uint32_t num_hash_fun = 2;
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
|
|
"key05"};
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
|
|
{user_keys[3], {3, 4}}, {user_keys[4], {0, 1}},
|
|
};
|
|
hash_map = std::move(hm);
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("WithCollisionPathUserKey");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
2, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], false)), Slice("value"));
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
ASSERT_OK(builder.status());
|
|
}
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
}
|
|
|
|
TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) {
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
// support operator= with initializer_list as a parameter
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
{"repeatedkey", {0, 1, 2, 3}}};
|
|
hash_map = std::move(hm);
|
|
uint32_t num_hash_fun = 4;
|
|
std::string user_key = "repeatedkey";
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
fname = test::PerThreadDBPath("FailWhenSameKeyInserted");
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), fname,
|
|
file_options_, &file_writer, nullptr));
|
|
CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
|
|
100, BytewiseComparator(), 1, false, false,
|
|
GetSliceHash, 0 /* column_family_id */,
|
|
kDefaultColumnFamilyName);
|
|
ASSERT_OK(builder.status());
|
|
|
|
builder.Add(Slice(GetInternalKey(user_key, false)), Slice("value1"));
|
|
ASSERT_EQ(builder.NumEntries(), 1u);
|
|
ASSERT_OK(builder.status());
|
|
builder.Add(Slice(GetInternalKey(user_key, true)), Slice("value2"));
|
|
ASSERT_EQ(builder.NumEntries(), 2u);
|
|
ASSERT_OK(builder.status());
|
|
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
|
ASSERT_OK(file_writer->Close(IOOptions()));
|
|
}
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
int main(int argc, char** argv) {
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
return RUN_ALL_TESTS();
|
|
}
|