Format files under table/ by clang-format (#10852)

Summary:
Run clang-format on files under the `table` directory.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10852

Reviewed By: ajkr

Differential Revision: D40650732

Pulled By: anand1976

fbshipit-source-id: 2023a958e37fd6274040c5181130284600c9e0ef
This commit is contained in:
anand76 2022-10-25 11:50:38 -07:00 committed by Facebook GitHub Bot
parent 7a95938899
commit 727bad78b8
57 changed files with 511 additions and 533 deletions

View File

@ -6,9 +6,9 @@
#ifndef ROCKSDB_LITE
#include "table/adaptive/adaptive_table_factory.h"
#include "table/table_builder.h"
#include "table/format.h"
#include "port/port.h"
#include "table/format.h"
#include "table/table_builder.h"
namespace ROCKSDB_NAMESPACE {
@ -118,7 +118,8 @@ extern TableFactory* NewAdaptiveTableFactory(
std::shared_ptr<TableFactory> plain_table_factory,
std::shared_ptr<TableFactory> cuckoo_table_factory) {
return new AdaptiveTableFactory(table_factory_to_write,
block_based_table_factory, plain_table_factory, cuckoo_table_factory);
block_based_table_factory,
plain_table_factory, cuckoo_table_factory);
}
} // namespace ROCKSDB_NAMESPACE

View File

@ -8,6 +8,7 @@
#ifndef ROCKSDB_LITE
#include <string>
#include "rocksdb/options.h"
#include "rocksdb/table.h"

View File

@ -59,7 +59,6 @@ namespace ROCKSDB_NAMESPACE {
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
namespace {

View File

@ -474,7 +474,8 @@ void BlockBasedTableFactory::InitializeOptions() {
}
if (table_options_.index_type == BlockBasedTableOptions::kHashSearch &&
table_options_.index_block_restart_interval != 1) {
// Currently kHashSearch is incompatible with index_block_restart_interval > 1
// Currently kHashSearch is incompatible with
// index_block_restart_interval > 1
table_options_.index_block_restart_interval = 1;
}
if (table_options_.partition_filters &&

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/block_based_table_reader_impl.h"
#include "table/block_based/block_prefetcher.h"
#include "table/block_based/reader_common.h"

View File

@ -104,9 +104,7 @@ extern const uint64_t kBlockBasedTableMagicNumber;
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
BlockBasedTable::~BlockBasedTable() {
delete rep_;
}
BlockBasedTable::~BlockBasedTable() { delete rep_; }
namespace {
// Read the block identified by "handle" from "file".

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/reader_common.h"
// The file contains some member functions of BlockBasedTable that

View File

@ -34,7 +34,9 @@
#include "table/block_based/block_builder.h"
#include <assert.h>
#include <algorithm>
#include "db/dbformat.h"
#include "rocksdb/comparator.h"
#include "table/block_based/data_block_footer.h"

View File

@ -8,9 +8,10 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include <stdint.h>
#include <vector>
#include <stdint.h>
#include "rocksdb/slice.h"
#include "rocksdb/table.h"
#include "table/block_based/data_block_hash_index.h"

View File

@ -10,6 +10,7 @@
#pragma once
#include <cassert>
#include "port/likely.h"
#include "rocksdb/cache.h"
#include "rocksdb/cleanable.h"
@ -40,18 +41,17 @@ namespace ROCKSDB_NAMESPACE {
template <class T>
class CachableEntry {
public:
public:
CachableEntry() = default;
CachableEntry(T* value, Cache* cache, Cache::Handle* cache_handle,
bool own_value)
: value_(value)
, cache_(cache)
, cache_handle_(cache_handle)
, own_value_(own_value)
{
bool own_value)
: value_(value),
cache_(cache),
cache_handle_(cache_handle),
own_value_(own_value) {
assert(value_ != nullptr ||
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
assert(!!cache_ == !!cache_handle_);
assert(!cache_handle_ || !own_value_);
}
@ -65,7 +65,7 @@ public:
cache_handle_(rhs.cache_handle_),
own_value_(rhs.own_value_) {
assert(value_ != nullptr ||
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
assert(!!cache_ == !!cache_handle_);
assert(!cache_handle_ || !own_value_);
@ -85,7 +85,7 @@ public:
own_value_ = rhs.own_value_;
assert(value_ != nullptr ||
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
(cache_ == nullptr && cache_handle_ == nullptr && !own_value_));
assert(!!cache_ == !!cache_handle_);
assert(!cache_handle_ || !own_value_);
@ -94,13 +94,11 @@ public:
return *this;
}
~CachableEntry() {
ReleaseResource();
}
~CachableEntry() { ReleaseResource(); }
bool IsEmpty() const {
return value_ == nullptr && cache_ == nullptr && cache_handle_ == nullptr &&
!own_value_;
!own_value_;
}
bool IsCached() const {
@ -193,22 +191,22 @@ public:
return true;
}
private:
void ReleaseResource() noexcept {
if (LIKELY(cache_handle_ != nullptr)) {
assert(cache_ != nullptr);
cache_->Release(cache_handle_);
} else if (own_value_) {
delete value_;
}
}
private:
void ReleaseResource() noexcept {
if (LIKELY(cache_handle_ != nullptr)) {
assert(cache_ != nullptr);
cache_->Release(cache_handle_);
} else if (own_value_) {
delete value_;
}
}
void ResetFields() noexcept {
value_ = nullptr;
cache_ = nullptr;
cache_handle_ = nullptr;
own_value_ = false;
}
void ResetFields() noexcept {
value_ = nullptr;
cache_ = nullptr;
cache_handle_ = nullptr;
own_value_ = false;
}
static void ReleaseCacheHandle(void* arg1, void* arg2) {
Cache* const cache = static_cast<Cache*>(arg1);
@ -224,7 +222,7 @@ private:
delete static_cast<T*>(arg1);
}
private:
private:
T* value_ = nullptr;
Cache* cache_ = nullptr;
Cache::Handle* cache_handle_ = nullptr;

View File

@ -2,11 +2,12 @@
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "table/block_based/data_block_hash_index.h"
#include <string>
#include <vector>
#include "rocksdb/slice.h"
#include "table/block_based/data_block_hash_index.h"
#include "util/coding.h"
#include "util/hash.h"

View File

@ -130,7 +130,7 @@ TEST(DataBlockHashIndex, DataBlockHashTest) {
ASSERT_EQ(buffer.size(), estimated_size);
buffer2 = buffer; // test for the correctness of relative offset
buffer2 = buffer; // test for the correctness of relative offset
Slice s(buffer2);
DataBlockHashIndex index;
@ -167,7 +167,7 @@ TEST(DataBlockHashIndex, DataBlockHashTestCollision) {
ASSERT_EQ(buffer.size(), estimated_size);
buffer2 = buffer; // test for the correctness of relative offset
buffer2 = buffer; // test for the correctness of relative offset
Slice s(buffer2);
DataBlockHashIndex index;
@ -208,7 +208,7 @@ TEST(DataBlockHashIndex, DataBlockHashTestLarge) {
ASSERT_EQ(buffer.size(), estimated_size);
buffer2 = buffer; // test for the correctness of relative offset
buffer2 = buffer; // test for the correctness of relative offset
Slice s(buffer2);
DataBlockHashIndex index;

View File

@ -53,11 +53,11 @@ class FilterBlockBuilder {
virtual ~FilterBlockBuilder() {}
virtual void Add(
const Slice& key_without_ts) = 0; // Add a key to current filter
virtual bool IsEmpty() const = 0; // Empty == none added
const Slice& key_without_ts) = 0; // Add a key to current filter
virtual bool IsEmpty() const = 0; // Empty == none added
// For reporting stats on how many entries the builder considered unique
virtual size_t EstimateEntriesAdded() = 0;
Slice Finish() { // Generate Filter
Slice Finish() { // Generate Filter
const BlockHandle empty_handle;
Status dont_care_status;
auto ret = Finish(empty_handle, &dont_care_status);

View File

@ -5,6 +5,7 @@
//
#include "table/block_based/filter_block_reader_common.h"
#include "monitoring/perf_context_imp.h"
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/parsed_full_filter_block.h"

View File

@ -1422,10 +1422,9 @@ FilterBitsBuilder* BloomLikeFilterPolicy::GetFastLocalBloomBuilderWithContext(
CacheReservationManagerImpl<CacheEntryRole::kFilterConstruction>>(
context.table_options.block_cache);
}
return new FastLocalBloomBitsBuilder(
millibits_per_key_, offm ? &aggregate_rounding_balance_ : nullptr,
cache_res_mgr,
context.table_options.detect_filter_construct_corruption);
return new FastLocalBloomBitsBuilder(
millibits_per_key_, offm ? &aggregate_rounding_balance_ : nullptr,
cache_res_mgr, context.table_options.detect_filter_construct_corruption);
}
FilterBitsBuilder* BloomLikeFilterPolicy::GetLegacyBloomBuilderWithContext(
@ -1788,7 +1787,7 @@ FilterBuildingContext::FilterBuildingContext(
const BlockBasedTableOptions& _table_options)
: table_options(_table_options) {}
FilterPolicy::~FilterPolicy() { }
FilterPolicy::~FilterPolicy() {}
std::shared_ptr<const FilterPolicy> BloomLikeFilterPolicy::Create(
const std::string& name, double bits_per_key) {

View File

@ -16,7 +16,6 @@
#include "table/block_based/flush_block_policy.h"
#include "table/format.h"
namespace ROCKSDB_NAMESPACE {
// Flush block by size
@ -27,8 +26,7 @@ class FlushBlockBySizePolicy : public FlushBlockPolicy {
// @params block_size_deviation: This is used to close a block before it
// reaches the configured
FlushBlockBySizePolicy(const uint64_t block_size,
const uint64_t block_size_deviation,
const bool align,
const uint64_t block_size_deviation, const bool align,
const BlockBuilder& data_block_builder)
: block_size_(block_size),
block_size_deviation_limit_(

View File

@ -121,8 +121,7 @@ Slice FullFilterBlockBuilder::Finish(
FullFilterBlockReader::FullFilterBlockReader(
const BlockBasedTable* t,
CachableEntry<ParsedFullFilterBlock>&& filter_block)
: FilterBlockReaderCommon(t, std::move(filter_block)) {
}
: FilterBlockReaderCommon(t, std::move(filter_block)) {}
bool FullFilterBlockReader::KeyMayMatch(const Slice& key, const bool no_io,
const Slice* const /*const_ikey_ptr*/,

View File

@ -133,6 +133,7 @@ class FullFilterBlockReader
BlockCacheLookupContext* lookup_context,
Env::IOPriority rate_limiter_priority) override;
size_t ApproximateMemoryUsage() const override;
private:
bool MayMatch(const Slice& entry, bool no_io, GetContext* get_context,
BlockCacheLookupContext* lookup_context,

View File

@ -80,7 +80,6 @@ class TestFilterBitsReader : public FilterBitsReader {
uint32_t len_;
};
class TestHashFilter : public FilterPolicy {
public:
const char* Name() const override { return "TestHashFilter"; }

View File

@ -10,8 +10,8 @@
#pragma once
#include <assert.h>
#include <cinttypes>
#include <cinttypes>
#include <list>
#include <string>
#include <unordered_map>

View File

@ -9,7 +9,6 @@
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/reader_common.h"
namespace ROCKSDB_NAMESPACE {

View File

@ -86,7 +86,8 @@ class PartitionedFilterBlockTest
int num_keys = sizeof(keys) / sizeof(*keys);
uint64_t max_key_size = 0;
for (int i = 1; i < num_keys; i++) {
max_key_size = std::max(max_key_size, static_cast<uint64_t>(keys[i].size()));
max_key_size =
std::max(max_key_size, static_cast<uint64_t>(keys[i].size()));
}
uint64_t max_index_size = num_keys * (max_key_size + 8 /*handle*/);
return max_index_size;
@ -116,11 +117,11 @@ class PartitionedFilterBlockTest
PartitionedIndexBuilder* const p_index_builder,
const SliceTransform* prefix_extractor = nullptr) {
assert(table_options_.block_size_deviation <= 100);
auto partition_size = static_cast<uint32_t>(
((table_options_.metadata_block_size *
(100 - table_options_.block_size_deviation)) +
99) /
100);
auto partition_size =
static_cast<uint32_t>(((table_options_.metadata_block_size *
(100 - table_options_.block_size_deviation)) +
99) /
100);
partition_size = std::max(partition_size, static_cast<uint32_t>(1));
const bool kValueDeltaEncoded = true;
return new PartitionedFilterBlockBuilder(

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/block_based_table_reader_impl.h"
#include "table/block_based/block_prefetcher.h"
#include "table/block_based/reader_common.h"

View File

@ -7,6 +7,7 @@
#pragma once
#include <cassert>
#include "table/block_based/cachable_entry.h"
#include "table/format.h"

View File

@ -147,12 +147,11 @@ inline void BlockFetcher::PrepareBufferForBlockFromFile() {
// file reader that does not implement mmap reads properly.
used_buf_ = &stack_buf_[0];
} else if (maybe_compressed_ && !do_uncompress_) {
compressed_buf_ = AllocateBlock(block_size_with_trailer_,
memory_allocator_compressed_);
compressed_buf_ =
AllocateBlock(block_size_with_trailer_, memory_allocator_compressed_);
used_buf_ = compressed_buf_.get();
} else {
heap_buf_ =
AllocateBlock(block_size_with_trailer_, memory_allocator_);
heap_buf_ = AllocateBlock(block_size_with_trailer_, memory_allocator_);
used_buf_ = heap_buf_.get();
}
}
@ -187,8 +186,8 @@ inline void BlockFetcher::CopyBufferToHeapBuf() {
inline void BlockFetcher::CopyBufferToCompressedBuf() {
assert(used_buf_ != compressed_buf_.get());
compressed_buf_ = AllocateBlock(block_size_with_trailer_,
memory_allocator_compressed_);
compressed_buf_ =
AllocateBlock(block_size_with_trailer_, memory_allocator_compressed_);
memcpy(compressed_buf_.get(), used_buf_, block_size_with_trailer_);
#ifndef NDEBUG
num_compressed_buf_memcpy_++;

View File

@ -19,8 +19,8 @@ namespace ROCKSDB_NAMESPACE {
// Retrieves a single block of a given file. Utilizes the prefetch buffer and/or
// persistent cache provided (if any) to try to avoid reading from the file
// directly. Note that both the prefetch buffer and the persistent cache are
// optional; also, note that the persistent cache may be configured to store either
// compressed or uncompressed blocks.
// optional; also, note that the persistent cache may be configured to store
// either compressed or uncompressed blocks.
//
// If the retrieved block is compressed and the do_uncompress flag is set,
// BlockFetcher uncompresses the block (using the uncompression dictionary,

View File

@ -7,6 +7,7 @@
#include "table/cuckoo/cuckoo_table_builder.h"
#include <assert.h>
#include <algorithm>
#include <limits>
#include <string>
@ -26,23 +27,23 @@
namespace ROCKSDB_NAMESPACE {
const std::string CuckooTablePropertyNames::kEmptyKey =
"rocksdb.cuckoo.bucket.empty.key";
"rocksdb.cuckoo.bucket.empty.key";
const std::string CuckooTablePropertyNames::kNumHashFunc =
"rocksdb.cuckoo.hash.num";
"rocksdb.cuckoo.hash.num";
const std::string CuckooTablePropertyNames::kHashTableSize =
"rocksdb.cuckoo.hash.size";
"rocksdb.cuckoo.hash.size";
const std::string CuckooTablePropertyNames::kValueLength =
"rocksdb.cuckoo.value.length";
"rocksdb.cuckoo.value.length";
const std::string CuckooTablePropertyNames::kIsLastLevel =
"rocksdb.cuckoo.file.islastlevel";
"rocksdb.cuckoo.file.islastlevel";
const std::string CuckooTablePropertyNames::kCuckooBlockSize =
"rocksdb.cuckoo.hash.cuckooblocksize";
"rocksdb.cuckoo.hash.cuckooblocksize";
const std::string CuckooTablePropertyNames::kIdentityAsFirstHash =
"rocksdb.cuckoo.hash.identityfirst";
"rocksdb.cuckoo.hash.identityfirst";
const std::string CuckooTablePropertyNames::kUseModuleHash =
"rocksdb.cuckoo.hash.usemodule";
"rocksdb.cuckoo.hash.usemodule";
const std::string CuckooTablePropertyNames::kUserKeyLength =
"rocksdb.cuckoo.hash.userkeylength";
"rocksdb.cuckoo.hash.userkeylength";
// Obtained by running echo rocksdb.table.cuckoo | sha1sum
extern const uint64_t kCuckooTableMagicNumber = 0x926789d0c5f17873ull;
@ -174,9 +175,12 @@ bool CuckooTableBuilder::IsDeletedKey(uint64_t idx) const {
Slice CuckooTableBuilder::GetKey(uint64_t idx) const {
assert(closed_);
if (IsDeletedKey(idx)) {
return Slice(&deleted_keys_[static_cast<size_t>((idx - num_values_) * key_size_)], static_cast<size_t>(key_size_));
return Slice(
&deleted_keys_[static_cast<size_t>((idx - num_values_) * key_size_)],
static_cast<size_t>(key_size_));
}
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_))], static_cast<size_t>(key_size_));
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_))],
static_cast<size_t>(key_size_));
}
Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const {
@ -190,11 +194,14 @@ Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
static std::string empty_value(static_cast<unsigned int>(value_size_), 'a');
return Slice(empty_value);
}
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_) + key_size_)], static_cast<size_t>(value_size_));
return Slice(
&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_) + key_size_)],
static_cast<size_t>(value_size_));
}
Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
buckets->resize(static_cast<size_t>(hash_table_size_ + cuckoo_block_size_ - 1));
buckets->resize(
static_cast<size_t>(hash_table_size_ + cuckoo_block_size_ - 1));
uint32_t make_space_for_key_call_id = 0;
for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
uint64_t bucket_id = 0;
@ -202,29 +209,33 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
autovector<uint64_t> hash_vals;
Slice user_key = GetUserKey(vector_idx);
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !bucket_found;
++hash_cnt) {
uint64_t hash_val = CuckooHash(user_key, hash_cnt, use_module_hash_,
hash_table_size_, identity_as_first_hash_, get_slice_hash_);
++hash_cnt) {
uint64_t hash_val =
CuckooHash(user_key, hash_cnt, use_module_hash_, hash_table_size_,
identity_as_first_hash_, get_slice_hash_);
// If there is a collision, check next cuckoo_block_size_ locations for
// empty locations. While checking, if we reach end of the hash table,
// stop searching and proceed for next hash function.
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, ++hash_val) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx == kMaxVectorIdx) {
++block_idx, ++hash_val) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx ==
kMaxVectorIdx) {
bucket_id = hash_val;
bucket_found = true;
break;
} else {
if (ucomp_->Compare(user_key,
GetUserKey((*buckets)[static_cast<size_t>(hash_val)].vector_idx)) == 0) {
if (ucomp_->Compare(
user_key, GetUserKey((*buckets)[static_cast<size_t>(hash_val)]
.vector_idx)) == 0) {
return Status::NotSupported("Same key is being inserted again.");
}
hash_vals.push_back(hash_val);
}
}
}
while (!bucket_found && !MakeSpaceForKey(hash_vals,
++make_space_for_key_call_id, buckets, &bucket_id)) {
while (!bucket_found &&
!MakeSpaceForKey(hash_vals, ++make_space_for_key_call_id, buckets,
&bucket_id)) {
// Rehash by increashing number of hash tables.
if (num_hash_func_ >= max_num_hash_func_) {
return Status::NotSupported("Too many collisions. Unable to hash.");
@ -232,11 +243,13 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
// We don't really need to rehash the entire table because old hashes are
// still valid and we only increased the number of hash functions.
uint64_t hash_val = CuckooHash(user_key, num_hash_func_, use_module_hash_,
hash_table_size_, identity_as_first_hash_, get_slice_hash_);
hash_table_size_, identity_as_first_hash_,
get_slice_hash_);
++num_hash_func_;
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, ++hash_val) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx == kMaxVectorIdx) {
++block_idx, ++hash_val) {
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx ==
kMaxVectorIdx) {
bucket_found = true;
bucket_id = hash_val;
break;
@ -259,7 +272,7 @@ Status CuckooTableBuilder::Finish() {
// Calculate the real hash size if module hash is enabled.
if (use_module_hash_) {
hash_table_size_ =
static_cast<uint64_t>(num_entries_ / max_hash_table_ratio_);
static_cast<uint64_t>(num_entries_ / max_hash_table_ratio_);
}
status_ = MakeHashTable(&buckets);
if (!status_.ok()) {
@ -300,9 +313,8 @@ Status CuckooTableBuilder::Finish() {
properties_.num_entries = num_entries_;
properties_.num_deletions = num_entries_ - num_values_;
properties_.fixed_key_len = key_size_;
properties_.user_collected_properties[
CuckooTablePropertyNames::kValueLength].assign(
reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
properties_.user_collected_properties[CuckooTablePropertyNames::kValueLength]
.assign(reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
uint64_t bucket_size = key_size_ + value_size_;
unused_bucket.resize(static_cast<size_t>(bucket_size), 'a');
@ -332,37 +344,35 @@ Status CuckooTableBuilder::Finish() {
uint64_t offset = buckets.size() * bucket_size;
properties_.data_size = offset;
unused_bucket.resize(static_cast<size_t>(properties_.fixed_key_len));
properties_.user_collected_properties[
CuckooTablePropertyNames::kEmptyKey] = unused_bucket;
properties_.user_collected_properties[
CuckooTablePropertyNames::kNumHashFunc].assign(
reinterpret_cast<char*>(&num_hash_func_), sizeof(num_hash_func_));
properties_.user_collected_properties[CuckooTablePropertyNames::kEmptyKey] =
unused_bucket;
properties_.user_collected_properties[CuckooTablePropertyNames::kNumHashFunc]
.assign(reinterpret_cast<char*>(&num_hash_func_), sizeof(num_hash_func_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kHashTableSize].assign(
reinterpret_cast<const char*>(&hash_table_size_),
sizeof(hash_table_size_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kIsLastLevel].assign(
reinterpret_cast<const char*>(&is_last_level_file_),
sizeof(is_last_level_file_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kCuckooBlockSize].assign(
reinterpret_cast<const char*>(&cuckoo_block_size_),
sizeof(cuckoo_block_size_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kIdentityAsFirstHash].assign(
reinterpret_cast<const char*>(&identity_as_first_hash_),
sizeof(identity_as_first_hash_));
properties_.user_collected_properties[
CuckooTablePropertyNames::kUseModuleHash].assign(
reinterpret_cast<const char*>(&use_module_hash_),
sizeof(use_module_hash_));
properties_
.user_collected_properties[CuckooTablePropertyNames::kHashTableSize]
.assign(reinterpret_cast<const char*>(&hash_table_size_),
sizeof(hash_table_size_));
properties_.user_collected_properties[CuckooTablePropertyNames::kIsLastLevel]
.assign(reinterpret_cast<const char*>(&is_last_level_file_),
sizeof(is_last_level_file_));
properties_
.user_collected_properties[CuckooTablePropertyNames::kCuckooBlockSize]
.assign(reinterpret_cast<const char*>(&cuckoo_block_size_),
sizeof(cuckoo_block_size_));
properties_
.user_collected_properties[CuckooTablePropertyNames::kIdentityAsFirstHash]
.assign(reinterpret_cast<const char*>(&identity_as_first_hash_),
sizeof(identity_as_first_hash_));
properties_
.user_collected_properties[CuckooTablePropertyNames::kUseModuleHash]
.assign(reinterpret_cast<const char*>(&use_module_hash_),
sizeof(use_module_hash_));
uint32_t user_key_len = static_cast<uint32_t>(smallest_user_key_.size());
properties_.user_collected_properties[
CuckooTablePropertyNames::kUserKeyLength].assign(
reinterpret_cast<const char*>(&user_key_len),
sizeof(user_key_len));
properties_
.user_collected_properties[CuckooTablePropertyNames::kUserKeyLength]
.assign(reinterpret_cast<const char*>(&user_key_len),
sizeof(user_key_len));
// Write meta blocks.
MetaIndexBuilder meta_index_builder;
@ -406,9 +416,7 @@ void CuckooTableBuilder::Abandon() {
closed_ = true;
}
uint64_t CuckooTableBuilder::NumEntries() const {
return num_entries_;
}
uint64_t CuckooTableBuilder::NumEntries() const { return num_entries_; }
uint64_t CuckooTableBuilder::FileSize() const {
if (closed_) {
@ -418,8 +426,8 @@ uint64_t CuckooTableBuilder::FileSize() const {
}
if (use_module_hash_) {
return static_cast<uint64_t>((key_size_ + value_size_) *
num_entries_ / max_hash_table_ratio_);
return static_cast<uint64_t>((key_size_ + value_size_) * num_entries_ /
max_hash_table_ratio_);
} else {
// Account for buckets being a power of two.
// As elements are added, file size remains constant for a while and
@ -468,7 +476,8 @@ bool CuckooTableBuilder::MakeSpaceForKey(
// no. of times this will be called is <= max_num_hash_func_ + num_entries_.
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
uint64_t bid = hash_vals[hash_cnt];
(*buckets)[static_cast<size_t>(bid)].make_space_for_key_call_id = make_space_for_key_call_id;
(*buckets)[static_cast<size_t>(bid)].make_space_for_key_call_id =
make_space_for_key_call_id;
tree.push_back(CuckooNode(bid, 0, 0));
}
bool null_found = false;
@ -479,24 +488,25 @@ bool CuckooTableBuilder::MakeSpaceForKey(
if (curr_depth >= max_search_depth_) {
break;
}
CuckooBucket& curr_bucket = (*buckets)[static_cast<size_t>(curr_node.bucket_id)];
for (uint32_t hash_cnt = 0;
hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) {
uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx),
hash_cnt, use_module_hash_, hash_table_size_, identity_as_first_hash_,
get_slice_hash_);
CuckooBucket& curr_bucket =
(*buckets)[static_cast<size_t>(curr_node.bucket_id)];
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !null_found;
++hash_cnt) {
uint64_t child_bucket_id = CuckooHash(
GetUserKey(curr_bucket.vector_idx), hash_cnt, use_module_hash_,
hash_table_size_, identity_as_first_hash_, get_slice_hash_);
// Iterate inside Cuckoo Block.
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, ++child_bucket_id) {
if ((*buckets)[static_cast<size_t>(child_bucket_id)].make_space_for_key_call_id ==
make_space_for_key_call_id) {
++block_idx, ++child_bucket_id) {
if ((*buckets)[static_cast<size_t>(child_bucket_id)]
.make_space_for_key_call_id == make_space_for_key_call_id) {
continue;
}
(*buckets)[static_cast<size_t>(child_bucket_id)].make_space_for_key_call_id =
make_space_for_key_call_id;
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1,
curr_pos));
if ((*buckets)[static_cast<size_t>(child_bucket_id)].vector_idx == kMaxVectorIdx) {
(*buckets)[static_cast<size_t>(child_bucket_id)]
.make_space_for_key_call_id = make_space_for_key_call_id;
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1, curr_pos));
if ((*buckets)[static_cast<size_t>(child_bucket_id)].vector_idx ==
kMaxVectorIdx) {
null_found = true;
break;
}
@ -515,7 +525,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
while (bucket_to_replace_pos >= num_hash_func_) {
CuckooNode& curr_node = tree[bucket_to_replace_pos];
(*buckets)[static_cast<size_t>(curr_node.bucket_id)] =
(*buckets)[static_cast<size_t>(tree[curr_node.parent_pos].bucket_id)];
(*buckets)[static_cast<size_t>(tree[curr_node.parent_pos].bucket_id)];
bucket_to_replace_pos = curr_node.parent_pos;
}
*bucket_id = tree[bucket_to_replace_pos].bucket_id;

View File

@ -6,10 +6,12 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <stdint.h>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "db/version_edit.h"
#include "port/port.h"
#include "rocksdb/status.h"
@ -20,7 +22,7 @@
namespace ROCKSDB_NAMESPACE {
class CuckooTableBuilder: public TableBuilder {
class CuckooTableBuilder : public TableBuilder {
public:
CuckooTableBuilder(
WritableFileWriter* file, double max_hash_table_ratio,
@ -78,8 +80,7 @@ class CuckooTableBuilder: public TableBuilder {
private:
struct CuckooBucket {
CuckooBucket()
: vector_idx(kMaxVectorIdx), make_space_for_key_call_id(0) {}
CuckooBucket() : vector_idx(kMaxVectorIdx), make_space_for_key_call_id(0) {}
uint32_t vector_idx;
// This number will not exceed kvs_.size() + max_num_hash_func_.
// We assume number of items is <= 2^32.
@ -125,7 +126,7 @@ class CuckooTableBuilder: public TableBuilder {
bool use_module_hash_;
bool identity_as_first_hash_;
uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index,
uint64_t max_num_buckets);
uint64_t max_num_buckets);
std::string largest_user_key_ = "";
std::string smallest_user_key_ = "";

View File

@ -42,11 +42,13 @@ class CuckooBuilderTest : public testing::Test {
}
void CheckFileContents(const std::vector<std::string>& keys,
const std::vector<std::string>& values,
const std::vector<uint64_t>& expected_locations,
std::string expected_unused_bucket, uint64_t expected_table_size,
uint32_t expected_num_hash_func, bool expected_is_last_level,
uint32_t expected_cuckoo_block_size = 1) {
const std::vector<std::string>& values,
const std::vector<uint64_t>& expected_locations,
std::string expected_unused_bucket,
uint64_t expected_table_size,
uint32_t expected_num_hash_func,
bool expected_is_last_level,
uint32_t expected_cuckoo_block_size = 1) {
uint64_t num_deletions = 0;
for (const auto& key : keys) {
ParsedInternalKey parsed;
@ -72,39 +74,44 @@ class CuckooBuilderTest : public testing::Test {
ASSERT_OK(ReadTableProperties(file_reader.get(), read_file_size,
kCuckooTableMagicNumber, ioptions, &props));
// Check unused bucket.
std::string unused_key = props->user_collected_properties[
CuckooTablePropertyNames::kEmptyKey];
ASSERT_EQ(expected_unused_bucket.substr(0,
props->fixed_key_len), unused_key);
std::string unused_key =
props->user_collected_properties[CuckooTablePropertyNames::kEmptyKey];
ASSERT_EQ(expected_unused_bucket.substr(0, props->fixed_key_len),
unused_key);
uint64_t value_len_found =
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kValueLength].data());
uint64_t value_len_found = *reinterpret_cast<const uint64_t*>(
props->user_collected_properties[CuckooTablePropertyNames::kValueLength]
.data());
ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
ASSERT_EQ(props->raw_value_size, values.size()*value_len_found);
const uint64_t table_size =
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kHashTableSize].data());
ASSERT_EQ(props->raw_value_size, values.size() * value_len_found);
const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
props
->user_collected_properties
[CuckooTablePropertyNames::kHashTableSize]
.data());
ASSERT_EQ(expected_table_size, table_size);
const uint32_t num_hash_func_found =
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kNumHashFunc].data());
const uint32_t num_hash_func_found = *reinterpret_cast<const uint32_t*>(
props->user_collected_properties[CuckooTablePropertyNames::kNumHashFunc]
.data());
ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
const uint32_t cuckoo_block_size =
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
CuckooTablePropertyNames::kCuckooBlockSize].data());
const uint32_t cuckoo_block_size = *reinterpret_cast<const uint32_t*>(
props
->user_collected_properties
[CuckooTablePropertyNames::kCuckooBlockSize]
.data());
ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
const bool is_last_level_found =
*reinterpret_cast<const bool*>(props->user_collected_properties[
CuckooTablePropertyNames::kIsLastLevel].data());
const bool is_last_level_found = *reinterpret_cast<const bool*>(
props->user_collected_properties[CuckooTablePropertyNames::kIsLastLevel]
.data());
ASSERT_EQ(expected_is_last_level, is_last_level_found);
ASSERT_EQ(props->num_entries, keys.size());
ASSERT_EQ(props->num_deletions, num_deletions);
ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
ASSERT_EQ(props->data_size, expected_unused_bucket.size() *
(expected_table_size + expected_cuckoo_block_size - 1));
ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len);
ASSERT_EQ(props->data_size,
expected_unused_bucket.size() *
(expected_table_size + expected_cuckoo_block_size - 1));
ASSERT_EQ(props->raw_key_size, keys.size() * props->fixed_key_len);
ASSERT_EQ(props->column_family_id, 0);
ASSERT_EQ(props->column_family_name, kDefaultColumnFamilyName);
@ -156,7 +163,6 @@ class CuckooBuilderTest : public testing::Test {
return NextPowOf2(static_cast<uint64_t>(num / kHashTableRatio));
}
Env* env_;
FileOptions file_options_;
std::string fname;
@ -276,8 +282,8 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 4, false);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 4, false);
}
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
@ -324,8 +330,8 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 3, false, cuckoo_block_size);
}
TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
@ -333,17 +339,14 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
// Finally insert an element with hash value somewhere in the middle
// so that it displaces all the elements after that.
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
{user_keys[3], {3, 4}}, {user_keys[4], {0, 2}},
};
hash_map = std::move(hm);
@ -376,23 +379,20 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 2, false);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 2, false);
}
TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {3, 4}},
{user_keys[3], {4, 5}},
{user_keys[4], {0, 3}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {3, 4}},
{user_keys[3], {4, 5}}, {user_keys[4], {0, 3}},
};
hash_map = std::move(hm);
@ -425,8 +425,8 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
std::string expected_unused_bucket = GetInternalKey("key00", true);
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 2, false, 2);
CheckFileContents(keys, values, expected_locations, expected_unused_bucket,
expected_table_size, 2, false, 2);
}
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
@ -469,7 +469,7 @@ TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
std::string expected_unused_bucket = "key00";
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(user_keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 2, true);
expected_unused_bucket, expected_table_size, 2, true);
}
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
@ -513,22 +513,19 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
std::string expected_unused_bucket = "key00";
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(user_keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 4, true);
expected_unused_bucket, expected_table_size, 4, true);
}
TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 2}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
{user_keys[3], {3, 4}}, {user_keys[4], {0, 2}},
};
hash_map = std::move(hm);
@ -559,7 +556,7 @@ TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
std::string expected_unused_bucket = "key00";
expected_unused_bucket += std::string(values[0].size(), 'a');
CheckFileContents(user_keys, values, expected_locations,
expected_unused_bucket, expected_table_size, 2, true);
expected_unused_bucket, expected_table_size, 2, true);
}
TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
@ -567,16 +564,13 @@ TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
// Finally try inserting an element with hash value somewhere in the middle
// and it should fail because the no. of elements to displace is too high.
uint32_t num_hash_fun = 2;
std::vector<std::string> user_keys = {"key01", "key02", "key03",
"key04", "key05"};
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04",
"key05"};
// Need to have a temporary variable here as VS compiler does not currently
// support operator= with initializer_list as a parameter
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
{user_keys[0], {0, 1}},
{user_keys[1], {1, 2}},
{user_keys[2], {2, 3}},
{user_keys[3], {3, 4}},
{user_keys[4], {0, 1}},
{user_keys[0], {0, 1}}, {user_keys[1], {1, 2}}, {user_keys[2], {2, 3}},
{user_keys[3], {3, 4}}, {user_keys[4], {0, 1}},
};
hash_map = std::move(hm);

View File

@ -7,9 +7,10 @@
#ifndef ROCKSDB_LITE
#include <string>
#include "rocksdb/options.h"
#include "rocksdb/table.h"
#include "util/murmurhash.h"
#include "rocksdb/options.h"
namespace ROCKSDB_NAMESPACE {

View File

@ -30,7 +30,7 @@ namespace ROCKSDB_NAMESPACE {
namespace {
const uint64_t CACHE_LINE_MASK = ~((uint64_t)CACHE_LINE_SIZE - 1);
const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
}
} // namespace
extern const uint64_t kCuckooTableMagicNumber;
@ -87,26 +87,26 @@ CuckooTableReader::CuckooTableReader(
status_ = Status::Corruption("User key length not found");
return;
}
user_key_length_ = *reinterpret_cast<const uint32_t*>(
user_key_len->second.data());
user_key_length_ =
*reinterpret_cast<const uint32_t*>(user_key_len->second.data());
auto value_length = user_props.find(CuckooTablePropertyNames::kValueLength);
if (value_length == user_props.end()) {
status_ = Status::Corruption("Value length not found");
return;
}
value_length_ = *reinterpret_cast<const uint32_t*>(
value_length->second.data());
value_length_ =
*reinterpret_cast<const uint32_t*>(value_length->second.data());
bucket_length_ = key_length_ + value_length_;
auto hash_table_size = user_props.find(
CuckooTablePropertyNames::kHashTableSize);
auto hash_table_size =
user_props.find(CuckooTablePropertyNames::kHashTableSize);
if (hash_table_size == user_props.end()) {
status_ = Status::Corruption("Hash table size not found");
return;
}
table_size_ = *reinterpret_cast<const uint64_t*>(
hash_table_size->second.data());
table_size_ =
*reinterpret_cast<const uint64_t*>(hash_table_size->second.data());
auto is_last_level = user_props.find(CuckooTablePropertyNames::kIsLastLevel);
if (is_last_level == user_props.end()) {
@ -115,31 +115,31 @@ CuckooTableReader::CuckooTableReader(
}
is_last_level_ = *reinterpret_cast<const bool*>(is_last_level->second.data());
auto identity_as_first_hash = user_props.find(
CuckooTablePropertyNames::kIdentityAsFirstHash);
auto identity_as_first_hash =
user_props.find(CuckooTablePropertyNames::kIdentityAsFirstHash);
if (identity_as_first_hash == user_props.end()) {
status_ = Status::Corruption("identity as first hash not found");
return;
}
identity_as_first_hash_ = *reinterpret_cast<const bool*>(
identity_as_first_hash->second.data());
identity_as_first_hash_ =
*reinterpret_cast<const bool*>(identity_as_first_hash->second.data());
auto use_module_hash = user_props.find(
CuckooTablePropertyNames::kUseModuleHash);
auto use_module_hash =
user_props.find(CuckooTablePropertyNames::kUseModuleHash);
if (use_module_hash == user_props.end()) {
status_ = Status::Corruption("hash type is not found");
return;
}
use_module_hash_ = *reinterpret_cast<const bool*>(
use_module_hash->second.data());
auto cuckoo_block_size = user_props.find(
CuckooTablePropertyNames::kCuckooBlockSize);
use_module_hash_ =
*reinterpret_cast<const bool*>(use_module_hash->second.data());
auto cuckoo_block_size =
user_props.find(CuckooTablePropertyNames::kCuckooBlockSize);
if (cuckoo_block_size == user_props.end()) {
status_ = Status::Corruption("Cuckoo block size not found");
return;
}
cuckoo_block_size_ = *reinterpret_cast<const uint32_t*>(
cuckoo_block_size->second.data());
cuckoo_block_size_ =
*reinterpret_cast<const uint32_t*>(cuckoo_block_size->second.data());
cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1;
// TODO: rate limit reads of whole cuckoo tables.
status_ =
@ -154,9 +154,10 @@ Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0));
Slice user_key = ExtractUserKey(key);
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
uint64_t offset = bucket_length_ * CuckooHash(
user_key, hash_cnt, use_module_hash_, table_size_,
identity_as_first_hash_, get_slice_hash_);
uint64_t offset =
bucket_length_ * CuckooHash(user_key, hash_cnt, use_module_hash_,
table_size_, identity_as_first_hash_,
get_slice_hash_);
const char* bucket = &file_data_.data()[offset];
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
++block_idx, bucket += bucket_length_) {
@ -195,9 +196,10 @@ Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
void CuckooTableReader::Prepare(const Slice& key) {
// Prefetch the first Cuckoo Block.
Slice user_key = ExtractUserKey(key);
uint64_t addr = reinterpret_cast<uint64_t>(file_data_.data()) +
bucket_length_ * CuckooHash(user_key, 0, use_module_hash_, table_size_,
identity_as_first_hash_, nullptr);
uint64_t addr =
reinterpret_cast<uint64_t>(file_data_.data()) +
bucket_length_ * CuckooHash(user_key, 0, use_module_hash_, table_size_,
identity_as_first_hash_, nullptr);
uint64_t end_addr = addr + cuckoo_block_bytes_minus_one_;
for (addr &= CACHE_LINE_MASK; addr < end_addr; addr += CACHE_LINE_SIZE) {
PREFETCH(reinterpret_cast<const char*>(addr), 0, 3);
@ -228,21 +230,22 @@ class CuckooTableIterator : public InternalIterator {
BucketComparator(const Slice& file_data, const Comparator* ucomp,
uint32_t bucket_len, uint32_t user_key_len,
const Slice& target = Slice())
: file_data_(file_data),
ucomp_(ucomp),
bucket_len_(bucket_len),
user_key_len_(user_key_len),
target_(target) {}
: file_data_(file_data),
ucomp_(ucomp),
bucket_len_(bucket_len),
user_key_len_(user_key_len),
target_(target) {}
bool operator()(const uint32_t first, const uint32_t second) const {
const char* first_bucket =
(first == kInvalidIndex) ? target_.data() :
&file_data_.data()[first * bucket_len_];
const char* first_bucket = (first == kInvalidIndex)
? target_.data()
: &file_data_.data()[first * bucket_len_];
const char* second_bucket =
(second == kInvalidIndex) ? target_.data() :
&file_data_.data()[second * bucket_len_];
(second == kInvalidIndex) ? target_.data()
: &file_data_.data()[second * bucket_len_];
return ucomp_->Compare(Slice(first_bucket, user_key_len_),
Slice(second_bucket, user_key_len_)) < 0;
}
private:
const Slice file_data_;
const Comparator* ucomp_;
@ -264,11 +267,11 @@ class CuckooTableIterator : public InternalIterator {
};
CuckooTableIterator::CuckooTableIterator(CuckooTableReader* reader)
: bucket_comparator_(reader->file_data_, reader->ucomp_,
reader->bucket_length_, reader->user_key_length_),
reader_(reader),
initialized_(false),
curr_key_idx_(kInvalidIndex) {
: bucket_comparator_(reader->file_data_, reader->ucomp_,
reader->bucket_length_, reader->user_key_length_),
reader_(reader),
initialized_(false),
curr_key_idx_(kInvalidIndex) {
sorted_bucket_ids_.clear();
curr_value_.clear();
curr_key_.Clear();
@ -278,7 +281,8 @@ void CuckooTableIterator::InitIfNeeded() {
if (initialized_) {
return;
}
sorted_bucket_ids_.reserve(static_cast<size_t>(reader_->GetTableProperties()->num_entries));
sorted_bucket_ids_.reserve(
static_cast<size_t>(reader_->GetTableProperties()->num_entries));
uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1;
assert(num_buckets < kInvalidIndex);
const char* bucket = reader_->file_data_.data();
@ -289,7 +293,7 @@ void CuckooTableIterator::InitIfNeeded() {
bucket += reader_->bucket_length_;
}
assert(sorted_bucket_ids_.size() ==
reader_->GetTableProperties()->num_entries);
reader_->GetTableProperties()->num_entries);
std::sort(sorted_bucket_ids_.begin(), sorted_bucket_ids_.end(),
bucket_comparator_);
curr_key_idx_ = kInvalidIndex;
@ -311,13 +315,11 @@ void CuckooTableIterator::SeekToLast() {
void CuckooTableIterator::Seek(const Slice& target) {
InitIfNeeded();
const BucketComparator seek_comparator(
reader_->file_data_, reader_->ucomp_,
reader_->bucket_length_, reader_->user_key_length_,
ExtractUserKey(target));
auto seek_it = std::lower_bound(sorted_bucket_ids_.begin(),
sorted_bucket_ids_.end(),
kInvalidIndex,
seek_comparator);
reader_->file_data_, reader_->ucomp_, reader_->bucket_length_,
reader_->user_key_length_, ExtractUserKey(target));
auto seek_it =
std::lower_bound(sorted_bucket_ids_.begin(), sorted_bucket_ids_.end(),
kInvalidIndex, seek_comparator);
curr_key_idx_ =
static_cast<uint32_t>(std::distance(sorted_bucket_ids_.begin(), seek_it));
PrepareKVAtCurrIdx();
@ -339,12 +341,12 @@ void CuckooTableIterator::PrepareKVAtCurrIdx() {
return;
}
uint32_t id = sorted_bucket_ids_[curr_key_idx_];
const char* offset = reader_->file_data_.data() +
id * reader_->bucket_length_;
const char* offset =
reader_->file_data_.data() + id * reader_->bucket_length_;
if (reader_->is_last_level_) {
// Always return internal key.
curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_),
0, kTypeValue);
curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_), 0,
kTypeValue);
} else {
curr_key_.SetInternalKey(Slice(offset, reader_->key_length_));
}
@ -388,8 +390,7 @@ InternalIterator* CuckooTableReader::NewIterator(
const ReadOptions& /*read_options*/,
const SliceTransform* /* prefix_extractor */, Arena* arena,
bool /*skip_filters*/, TableReaderCaller /*caller*/,
size_t /*compaction_readahead_size*/,
bool /* allow_unprepared_value */) {
size_t /*compaction_readahead_size*/, bool /* allow_unprepared_value */) {
if (!status().ok()) {
return NewErrorInternalIterator<Slice>(
Status::Corruption("CuckooTableReader status is not okay."), arena);

View File

@ -9,8 +9,8 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <memory>
#include <string>
#include <utility>
#include <vector>
@ -25,7 +25,7 @@ class Arena;
class TableReader;
struct ImmutableOptions;
class CuckooTableReader: public TableReader {
class CuckooTableReader : public TableReader {
public:
CuckooTableReader(const ImmutableOptions& ioptions,
std::unique_ptr<RandomAccessFileReader>&& file,
@ -93,7 +93,7 @@ class CuckooTableReader: public TableReader {
uint64_t table_size_;
const Comparator* ucomp_;
uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index,
uint64_t max_num_buckets);
uint64_t max_num_buckets);
};
} // namespace ROCKSDB_NAMESPACE

View File

@ -33,11 +33,12 @@ int main() {
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
DEFINE_string(file_dir, "", "Directory where the files will be created"
" for benchmark. Added for using tmpfs.");
DEFINE_string(file_dir, "",
"Directory where the files will be created"
" for benchmark. Added for using tmpfs.");
DEFINE_bool(enable_perf, false, "Run Benchmark Tests too.");
DEFINE_bool(write, false,
"Should write new values to file in performance tests?");
"Should write new values to file in performance tests?");
DEFINE_bool(identity_as_first_hash, true, "use identity as first hash");
namespace ROCKSDB_NAMESPACE {
@ -45,10 +46,10 @@ namespace ROCKSDB_NAMESPACE {
namespace {
const uint32_t kNumHashFunc = 10;
// Methods, variables related to Hash functions.
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
std::unordered_map<std::string, std::vector<uint64_t> > hash_map;
void AddHashLookups(const std::string& s, uint64_t bucket_id,
uint32_t num_hash_fun) {
uint32_t num_hash_fun) {
std::vector<uint64_t> v;
for (uint32_t i = 0; i < num_hash_fun; i++) {
v.push_back(bucket_id + i);
@ -128,8 +129,8 @@ class CuckooReaderTest : public testing::Test {
}
void UpdateKeys(bool with_zero_seqno) {
for (uint32_t i = 0; i < num_items; i++) {
ParsedInternalKey ikey(user_keys[i],
with_zero_seqno ? 0 : i + 1000, kTypeValue);
ParsedInternalKey ikey(user_keys[i], with_zero_seqno ? 0 : i + 1000,
kTypeValue);
keys[i].clear();
AppendInternalKey(&keys[i], ikey);
}
@ -189,11 +190,11 @@ class CuckooReaderTest : public testing::Test {
TableReaderCaller::kUncategorized);
ASSERT_OK(it->status());
ASSERT_TRUE(!it->Valid());
it->Seek(keys[num_items/2]);
it->Seek(keys[num_items / 2]);
ASSERT_TRUE(it->Valid());
ASSERT_OK(it->status());
ASSERT_TRUE(keys[num_items/2] == it->key());
ASSERT_TRUE(values[num_items/2] == it->value());
ASSERT_TRUE(keys[num_items / 2] == it->key());
ASSERT_TRUE(values[num_items / 2] == it->value());
ASSERT_OK(it->status());
it->~InternalIterator();
}
@ -273,7 +274,7 @@ TEST_F(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) {
}
TEST_F(CuckooReaderTest, CheckIterator) {
SetUp(2*kNumHashFunc);
SetUp(2 * kNumHashFunc);
fname = test::PerThreadDBPath("CuckooReader_CheckIterator");
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i] = "key" + NumToStr(i);
@ -281,7 +282,7 @@ TEST_F(CuckooReaderTest, CheckIterator) {
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values, in reverse order.
AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
AddHashLookups(user_keys[i], num_items - i - 1, kNumHashFunc);
}
CreateCuckooFileAndCheckReader();
CheckIterator();
@ -292,7 +293,7 @@ TEST_F(CuckooReaderTest, CheckIterator) {
}
TEST_F(CuckooReaderTest, CheckIteratorUint64) {
SetUp(2*kNumHashFunc);
SetUp(2 * kNumHashFunc);
fname = test::PerThreadDBPath("CuckooReader_CheckIterator");
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i].resize(8);
@ -301,7 +302,7 @@ TEST_F(CuckooReaderTest, CheckIteratorUint64) {
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values, in reverse order.
AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
AddHashLookups(user_keys[i], num_items - i - 1, kNumHashFunc);
}
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
CheckIterator(test::Uint64Comparator());
@ -366,11 +367,11 @@ TEST_F(CuckooReaderTest, WhenKeyNotFound) {
// Test read when key is unused key.
std::string unused_key =
reader.GetTableProperties()->user_collected_properties.at(
CuckooTablePropertyNames::kEmptyKey);
reader.GetTableProperties()->user_collected_properties.at(
CuckooTablePropertyNames::kEmptyKey);
// Add hash values that map to empty buckets.
AddHashLookups(ExtractUserKey(unused_key).ToString(),
kNumHashFunc, kNumHashFunc);
AddHashLookups(ExtractUserKey(unused_key).ToString(), kNumHashFunc,
kNumHashFunc);
value.Reset();
GetContext get_context3(
ucmp, nullptr, nullptr, nullptr, GetContext::kNotFound, Slice(unused_key),
@ -407,8 +408,8 @@ std::string GetFileName(uint64_t num) {
// Create last level file as we are interested in measuring performance of
// last level file only.
void WriteFile(const std::vector<std::string>& keys,
const uint64_t num, double hash_ratio) {
void WriteFile(const std::vector<std::string>& keys, const uint64_t num,
double hash_ratio) {
Options options;
options.allow_mmap_reads = true;
const auto& fs = options.env->GetFileSystem();
@ -478,13 +479,16 @@ void ReadKeys(uint64_t num, uint32_t batch_size) {
test::Uint64Comparator(), nullptr);
ASSERT_OK(reader.status());
const UserCollectedProperties user_props =
reader.GetTableProperties()->user_collected_properties;
reader.GetTableProperties()->user_collected_properties;
const uint32_t num_hash_fun = *reinterpret_cast<const uint32_t*>(
user_props.at(CuckooTablePropertyNames::kNumHashFunc).data());
const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
user_props.at(CuckooTablePropertyNames::kHashTableSize).data());
fprintf(stderr, "With %" PRIu64 " items, utilization is %.2f%%, number of"
" hash functions: %u.\n", num, num * 100.0 / (table_size), num_hash_fun);
fprintf(stderr,
"With %" PRIu64
" items, utilization is %.2f%%, number of"
" hash functions: %u.\n",
num, num * 100.0 / (table_size), num_hash_fun);
ReadOptions r_options;
std::vector<uint64_t> keys;
@ -502,10 +506,10 @@ void ReadKeys(uint64_t num, uint32_t batch_size) {
uint64_t start_time = env->NowMicros();
if (batch_size > 0) {
for (uint64_t i = 0; i < num; i += batch_size) {
for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
for (uint64_t j = i; j < i + batch_size && j < num; ++j) {
reader.Prepare(Slice(reinterpret_cast<char*>(&keys[j]), 16));
}
for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
for (uint64_t j = i; j < i + batch_size && j < num; ++j) {
reader.Get(r_options, Slice(reinterpret_cast<char*>(&keys[j]), 16),
&get_context, nullptr);
}
@ -518,8 +522,8 @@ void ReadKeys(uint64_t num, uint32_t batch_size) {
}
float time_per_op = (env->NowMicros() - start_time) * 1.0f / num;
fprintf(stderr,
"Time taken per op is %.3fus (%.1f Mqps) with batch size of %u\n",
time_per_op, 1.0 / time_per_op, batch_size);
"Time taken per op is %.3fus (%.1f Mqps) with batch size of %u\n",
time_per_op, 1.0 / time_per_op, batch_size);
}
} // namespace.
@ -531,10 +535,11 @@ TEST_F(CuckooReaderTest, TestReadPerformance) {
// These numbers are chosen to have a hash utilization % close to
// 0.9, 0.75, 0.6 and 0.5 respectively.
// They all create 128 M buckets.
std::vector<uint64_t> nums = {120*1024*1024, 100*1024*1024, 80*1024*1024,
70*1024*1024};
std::vector<uint64_t> nums = {120 * 1024 * 1024, 100 * 1024 * 1024,
80 * 1024 * 1024, 70 * 1024 * 1024};
#ifndef NDEBUG
fprintf(stdout,
fprintf(
stdout,
"WARNING: Not compiled with DNDEBUG. Performance tests may be slow.\n");
#endif
for (uint64_t num : nums) {

View File

@ -21,6 +21,7 @@ class MaxIteratorComparator {
bool operator()(IteratorWrapper* a, IteratorWrapper* b) const {
return comparator_->Compare(a->key(), b->key()) < 0;
}
private:
const InternalKeyComparator* comparator_;
};
@ -35,6 +36,7 @@ class MinIteratorComparator {
bool operator()(IteratorWrapper* a, IteratorWrapper* b) const {
return comparator_->Compare(a->key(), b->key()) > 0;
}
private:
const InternalKeyComparator* comparator_;
};

View File

@ -29,7 +29,7 @@ Status Iterator::GetProperty(std::string prop_name, std::string* prop) {
namespace {
class EmptyIterator : public Iterator {
public:
explicit EmptyIterator(const Status& s) : status_(s) { }
explicit EmptyIterator(const Status& s) : status_(s) {}
bool Valid() const override { return false; }
void Seek(const Slice& /*target*/) override {}
void SeekForPrev(const Slice& /*target*/) override {}

View File

@ -145,9 +145,7 @@ class IteratorWrapperBase {
return iter_->IsValuePinned();
}
bool IsValuePrepared() const {
return result_.value_prepared;
}
bool IsValuePrepared() const { return result_.value_prepared; }
Slice user_key() const {
assert(Valid());

View File

@ -35,8 +35,7 @@ const std::string kRangeDelBlockName = "rocksdb.range_del";
MetaIndexBuilder::MetaIndexBuilder()
: meta_index_block_(new BlockBuilder(1 /* restart interval */)) {}
void MetaIndexBuilder::Add(const std::string& key,
const BlockHandle& handle) {
void MetaIndexBuilder::Add(const std::string& key, const BlockHandle& handle) {
std::string handle_encoding;
handle.EncodeTo(&handle_encoding);
meta_block_handles_.insert({key, handle_encoding});
@ -173,8 +172,8 @@ void LogPropertiesCollectionError(Logger* info_log, const std::string& method,
assert(method == "Add" || method == "Finish");
std::string msg =
"Encountered error when calling TablePropertiesCollector::" +
method + "() with collector name: " + name;
"Encountered error when calling TablePropertiesCollector::" + method +
"() with collector name: " + name;
ROCKS_LOG_ERROR(info_log, "%s", msg.c_str());
}
@ -346,8 +345,9 @@ Status ReadTablePropertiesHelper(
if (!GetVarint64(&raw_val, &val)) {
// skip malformed value
auto error_msg =
"Detect malformed value in properties meta-block:"
"\tkey: " + key + "\tval: " + raw_val.ToString();
"Detect malformed value in properties meta-block:"
"\tkey: " +
key + "\tval: " + raw_val.ToString();
ROCKS_LOG_ERROR(ioptions.logger, "%s", error_msg.c_str());
continue;
}

View File

@ -123,8 +123,7 @@ class MultiGetContext {
assert(num_keys <= MAX_BATCH_SIZE);
if (num_keys > MAX_LOOKUP_KEYS_ON_STACK) {
lookup_key_heap_buf.reset(new char[sizeof(LookupKey) * num_keys]);
lookup_key_ptr_ = reinterpret_cast<LookupKey*>(
lookup_key_heap_buf.get());
lookup_key_ptr_ = reinterpret_cast<LookupKey*>(lookup_key_heap_buf.get());
}
for (size_t iter = 0; iter != num_keys_; ++iter) {
@ -157,8 +156,9 @@ class MultiGetContext {
private:
static const int MAX_LOOKUP_KEYS_ON_STACK = 16;
alignas(alignof(LookupKey))
char lookup_key_stack_buf[sizeof(LookupKey) * MAX_LOOKUP_KEYS_ON_STACK];
alignas(
alignof(LookupKey)) char lookup_key_stack_buf[sizeof(LookupKey) *
MAX_LOOKUP_KEYS_ON_STACK];
std::array<KeyContext*, MAX_BATCH_SIZE> sorted_keys_;
size_t num_keys_;
Mask value_mask_;
@ -250,8 +250,7 @@ class MultiGetContext {
size_t index_;
};
Range(const Range& mget_range,
const Iterator& first,
Range(const Range& mget_range, const Iterator& first,
const Iterator& last) {
ctx_ = mget_range.ctx_;
if (first == last) {

View File

@ -4,6 +4,7 @@
// (found in the LICENSE.Apache file in the root directory).
#include "table/persistent_cache_helper.h"
#include "table/block_based/block_based_table_reader.h"
#include "table/format.h"

View File

@ -7,9 +7,9 @@
#include <algorithm>
#include <string>
#include "util/dynamic_bloom.h"
#include "memory/allocator.h"
#include "util/dynamic_bloom.h"
namespace ROCKSDB_NAMESPACE {

View File

@ -82,8 +82,9 @@ PlainTableBuilder::PlainTableBuilder(
index_builder_.reset(new PlainTableIndexBuilder(
&arena_, ioptions, moptions.prefix_extractor.get(), index_sparseness,
hash_table_ratio, huge_page_tlb_size_));
properties_.user_collected_properties
[PlainTablePropertyNames::kBloomVersion] = "1"; // For future use
properties_
.user_collected_properties[PlainTablePropertyNames::kBloomVersion] =
"1"; // For future use
}
properties_.fixed_key_len = user_key_len;
@ -112,8 +113,8 @@ PlainTableBuilder::PlainTableBuilder(
std::string val;
PutFixed32(&val, static_cast<uint32_t>(encoder_.GetEncodingType()));
properties_.user_collected_properties
[PlainTablePropertyNames::kEncodingType] = val;
properties_
.user_collected_properties[PlainTablePropertyNames::kEncodingType] = val;
assert(int_tbl_prop_collector_factories);
for (auto& factory : *int_tbl_prop_collector_factories) {
@ -303,17 +304,13 @@ Status PlainTableBuilder::Finish() {
return status_;
}
void PlainTableBuilder::Abandon() {
closed_ = true;
}
void PlainTableBuilder::Abandon() { closed_ = true; }
uint64_t PlainTableBuilder::NumEntries() const {
return properties_.num_entries;
}
uint64_t PlainTableBuilder::FileSize() const {
return offset_;
}
uint64_t PlainTableBuilder::FileSize() const { return offset_; }
std::string PlainTableBuilder::GetFileChecksum() const {
if (file_ != nullptr) {

View File

@ -7,8 +7,10 @@
#ifndef ROCKSDB_LITE
#include <stdint.h>
#include <string>
#include <vector>
#include "db/version_edit.h"
#include "rocksdb/options.h"
#include "rocksdb/status.h"
@ -29,7 +31,7 @@ class TableBuilder;
// The builder class of PlainTable. For description of PlainTable format
// See comments of class PlainTableFactory, where instances of
// PlainTableReader are created.
class PlainTableBuilder: public TableBuilder {
class PlainTableBuilder : public TableBuilder {
public:
// Create a builder that will store the contents of the table it is
// building in *file. Does not close the file. It is up to the

View File

@ -6,9 +6,10 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <stdint.h>
#include <memory>
#include <string>
#include <stdint.h>
#include "rocksdb/table.h"
@ -177,6 +178,5 @@ class PlainTableFactory : public TableFactory {
PlainTableOptions table_options_;
};
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

View File

@ -19,7 +19,7 @@ inline uint32_t GetBucketIdFromHash(uint32_t hash, uint32_t num_buckets) {
assert(num_buckets > 0);
return hash % num_buckets;
}
}
} // namespace
Status PlainTableIndex::InitFromRawData(Slice data) {
if (!GetVarint32(&data, &index_size_)) {
@ -114,7 +114,7 @@ void PlainTableIndexBuilder::AllocateIndex() {
} else {
double hash_table_size_multipier = 1.0 / hash_table_ratio_;
index_size_ =
static_cast<uint32_t>(num_prefixes_ * hash_table_size_multipier) + 1;
static_cast<uint32_t>(num_prefixes_ * hash_table_size_multipier) + 1;
assert(index_size_ > 0);
}
}
@ -180,7 +180,8 @@ Slice PlainTableIndexBuilder::FillIndexes(
break;
default:
// point to second level indexes.
PutUnaligned(index + i, sub_index_offset | PlainTableIndex::kSubIndexMask);
PutUnaligned(index + i,
sub_index_offset | PlainTableIndex::kSubIndexMask);
char* prev_ptr = &sub_index[sub_index_offset];
char* cur_ptr = EncodeVarint32(prev_ptr, num_keys_for_bucket);
sub_index_offset += static_cast<uint32_t>(cur_ptr - prev_ptr);

View File

@ -188,8 +188,8 @@ class PlainTableIndexBuilder {
num_records_in_current_group_;
}
IndexRecord* At(size_t index) {
return &(groups_[index / kNumRecordsPerGroup]
[index % kNumRecordsPerGroup]);
return &(
groups_[index / kNumRecordsPerGroup][index % kNumRecordsPerGroup]);
}
private:

View File

@ -8,6 +8,7 @@
#include <algorithm>
#include <string>
#include "db/dbformat.h"
#include "file/writable_file_writer.h"
#include "table/plain/plain_table_factory.h"

View File

@ -11,14 +11,15 @@
#include <vector>
#include "db/dbformat.h"
#include "memory/arena.h"
#include "monitoring/histogram.h"
#include "monitoring/perf_context_imp.h"
#include "rocksdb/cache.h"
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
#include "table/block_based/block.h"
#include "table/block_based/filter_block.h"
#include "table/format.h"
@ -29,10 +30,6 @@
#include "table/plain/plain_table_factory.h"
#include "table/plain/plain_table_key_coding.h"
#include "table/two_level_iterator.h"
#include "memory/arena.h"
#include "monitoring/histogram.h"
#include "monitoring/perf_context_imp.h"
#include "util/coding.h"
#include "util/dynamic_bloom.h"
#include "util/hash.h"
@ -194,14 +191,12 @@ Status PlainTableReader::Open(
return s;
}
void PlainTableReader::SetupForCompaction() {
}
void PlainTableReader::SetupForCompaction() {}
InternalIterator* PlainTableReader::NewIterator(
const ReadOptions& options, const SliceTransform* /* prefix_extractor */,
Arena* arena, bool /*skip_filters*/, TableReaderCaller /*caller*/,
size_t /*compaction_readahead_size*/,
bool /* allow_unprepared_value */) {
size_t /*compaction_readahead_size*/, bool /* allow_unprepared_value */) {
// Not necessarily used here, but make sure this has been initialized
assert(table_properties_);
@ -640,8 +635,7 @@ PlainTableIterator::PlainTableIterator(PlainTableReader* table,
next_offset_ = offset_ = table_->file_info_.data_end_offset;
}
PlainTableIterator::~PlainTableIterator() {
}
PlainTableIterator::~PlainTableIterator() {}
bool PlainTableIterator::Valid() const {
return offset_ < table_->file_info_.data_end_offset &&
@ -671,9 +665,8 @@ void PlainTableIterator::Seek(const Slice& target) {
// it. This is needed for compaction: it creates iterator with
// total_order_seek = true but usually never does Seek() on it,
// only SeekToFirst().
status_ =
Status::InvalidArgument(
"total_order_seek not implemented for PlainTable.");
status_ = Status::InvalidArgument(
"total_order_seek not implemented for PlainTable.");
offset_ = next_offset_ = table_->file_info_.data_end_offset;
return;
}
@ -754,9 +747,7 @@ void PlainTableIterator::Next() {
}
}
void PlainTableIterator::Prev() {
assert(false);
}
void PlainTableIterator::Prev() { assert(false); }
Slice PlainTableIterator::key() const {
assert(Valid());
@ -768,9 +759,7 @@ Slice PlainTableIterator::value() const {
return value_;
}
Status PlainTableIterator::status() const {
return status_;
}
Status PlainTableIterator::status() const { return status_; }
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

View File

@ -6,12 +6,13 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <unordered_map>
#include <memory>
#include <vector>
#include <string>
#include <stdint.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "file/random_access_file_reader.h"
#include "memory/arena.h"
#include "rocksdb/env.h"
@ -58,14 +59,14 @@ struct PlainTableReaderFileInfo {
// The reader class of PlainTable. For description of PlainTable format
// See comments of class PlainTableFactory, where instances of
// PlainTableReader are created.
class PlainTableReader: public TableReader {
class PlainTableReader : public TableReader {
public:
// Based on following output file format shown in plain_table_factory.h
// When opening the output file, PlainTableReader creates a hash table
// from key prefixes to offset of the output file. PlainTable will decide
// whether it points to the data offset of the first key with the key prefix
// or the offset of it. If there are too many keys share this prefix, it will
// create a binary search-able index from the suffix to offset on disk.
// Based on following output file format shown in plain_table_factory.h
// When opening the output file, PlainTableReader creates a hash table
// from key prefixes to offset of the output file. PlainTable will decide
// whether it points to the data offset of the first key with the key prefix
// or the offset of it. If there are too many keys share this prefix, it will
// create a binary search-able index from the suffix to offset on disk.
static Status Open(const ImmutableOptions& ioptions,
const EnvOptions& env_options,
const InternalKeyComparator& internal_comparator,
@ -165,10 +166,11 @@ class PlainTableReader: public TableReader {
const ImmutableOptions& ioptions_;
std::unique_ptr<Cleanable> dummy_cleanable_;
uint64_t file_size_;
protected: // for testing
std::shared_ptr<const TableProperties> table_properties_;
private:
protected: // for testing
std::shared_ptr<const TableProperties> table_properties_;
private:
bool IsFixedLength() const {
return user_key_len_ != kPlainTableVariableLength;
}

View File

@ -7,8 +7,8 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "table/internal_iterator.h"
#include "port/port.h"
#include "table/internal_iterator.h"
namespace ROCKSDB_NAMESPACE {
class ScopedArenaIterator {
@ -20,7 +20,6 @@ class ScopedArenaIterator {
}
public:
explicit ScopedArenaIterator(InternalIterator* iter = nullptr)
: iter_(iter) {}
@ -50,9 +49,7 @@ class ScopedArenaIterator {
return res;
}
~ScopedArenaIterator() {
reset(nullptr);
}
~ScopedArenaIterator() { reset(nullptr); }
private:
InternalIterator* iter_;

View File

@ -223,9 +223,8 @@ Status SstFileDumper::CalculateCompressedTableSize(
table_options.block_size = block_size;
BlockBasedTableFactory block_based_tf(table_options);
std::unique_ptr<TableBuilder> table_builder;
table_builder.reset(block_based_tf.NewTableBuilder(
tb_options,
dest_writer.get()));
table_builder.reset(
block_based_tf.NewTableBuilder(tb_options, dest_writer.get()));
std::unique_ptr<InternalIterator> iter(table_reader_->NewIterator(
read_options_, moptions_.prefix_extractor.get(), /*arena=*/nullptr,
/*skip_filters=*/false, TableReaderCaller::kSSTDumpTool));

View File

@ -25,7 +25,7 @@ const std::string ExternalSstFilePropertyNames::kGlobalSeqno =
#ifndef ROCKSDB_LITE
const size_t kFadviseTrigger = 1024 * 1024; // 1MB
const size_t kFadviseTrigger = 1024 * 1024; // 1MB
struct SstFileWriter::Rep {
Rep(const EnvOptions& _env_options, const Options& options,
@ -210,8 +210,7 @@ struct SstFileWriter::Rep {
// Fadvise disabled
return s;
}
uint64_t bytes_since_last_fadvise =
builder->FileSize() - last_fadvise_size;
uint64_t bytes_since_last_fadvise = builder->FileSize() - last_fadvise_size;
if (bytes_since_last_fadvise > kFadviseTrigger || closing) {
TEST_SYNC_POINT_CALLBACK("SstFileWriter::Rep::InvalidatePageCache",
&(bytes_since_last_fadvise));
@ -422,9 +421,7 @@ Status SstFileWriter::Finish(ExternalSstFileInfo* file_info) {
return s;
}
uint64_t SstFileWriter::FileSize() {
return rep_->file_info.file_size;
}
uint64_t SstFileWriter::FileSize() { return rep_->file_info.file_size; }
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -21,32 +21,25 @@ const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily =
std::numeric_limits<int32_t>::max();
namespace {
void AppendProperty(
std::string& props,
const std::string& key,
const std::string& value,
const std::string& prop_delim,
const std::string& kv_delim) {
props.append(key);
props.append(kv_delim);
props.append(value);
props.append(prop_delim);
}
template <class TValue>
void AppendProperty(
std::string& props,
const std::string& key,
const TValue& value,
const std::string& prop_delim,
const std::string& kv_delim) {
AppendProperty(props, key, std::to_string(value), prop_delim, kv_delim);
}
void AppendProperty(std::string& props, const std::string& key,
const std::string& value, const std::string& prop_delim,
const std::string& kv_delim) {
props.append(key);
props.append(kv_delim);
props.append(value);
props.append(prop_delim);
}
std::string TableProperties::ToString(
const std::string& prop_delim,
const std::string& kv_delim) const {
template <class TValue>
void AppendProperty(std::string& props, const std::string& key,
const TValue& value, const std::string& prop_delim,
const std::string& kv_delim) {
AppendProperty(props, key, std::to_string(value), prop_delim, kv_delim);
}
} // namespace
std::string TableProperties::ToString(const std::string& prop_delim,
const std::string& kv_delim) const {
std::string result;
result.reserve(1024);
@ -81,8 +74,8 @@ std::string TableProperties::ToString(
if (index_partitions != 0) {
AppendProperty(result, "# index partitions", index_partitions, prop_delim,
kv_delim);
AppendProperty(result, "top-level index size", top_level_index_size, prop_delim,
kv_delim);
AppendProperty(result, "top-level index size", top_level_index_size,
prop_delim, kv_delim);
}
AppendProperty(result, "filter block size", filter_size, prop_delim,
kv_delim);
@ -256,10 +249,8 @@ const std::string TablePropertiesNames::kDbHostId =
"rocksdb.creating.host.identity";
const std::string TablePropertiesNames::kOriginalFileNumber =
"rocksdb.original.file.number";
const std::string TablePropertiesNames::kDataSize =
"rocksdb.data.size";
const std::string TablePropertiesNames::kIndexSize =
"rocksdb.index.size";
const std::string TablePropertiesNames::kDataSize = "rocksdb.data.size";
const std::string TablePropertiesNames::kIndexSize = "rocksdb.index.size";
const std::string TablePropertiesNames::kIndexPartitions =
"rocksdb.index.partitions";
const std::string TablePropertiesNames::kTopLevelIndexSize =
@ -268,16 +259,13 @@ const std::string TablePropertiesNames::kIndexKeyIsUserKey =
"rocksdb.index.key.is.user.key";
const std::string TablePropertiesNames::kIndexValueIsDeltaEncoded =
"rocksdb.index.value.is.delta.encoded";
const std::string TablePropertiesNames::kFilterSize =
"rocksdb.filter.size";
const std::string TablePropertiesNames::kRawKeySize =
"rocksdb.raw.key.size";
const std::string TablePropertiesNames::kFilterSize = "rocksdb.filter.size";
const std::string TablePropertiesNames::kRawKeySize = "rocksdb.raw.key.size";
const std::string TablePropertiesNames::kRawValueSize =
"rocksdb.raw.value.size";
const std::string TablePropertiesNames::kNumDataBlocks =
"rocksdb.num.data.blocks";
const std::string TablePropertiesNames::kNumEntries =
"rocksdb.num.entries";
const std::string TablePropertiesNames::kNumEntries = "rocksdb.num.entries";
const std::string TablePropertiesNames::kNumFilterEntries =
"rocksdb.num.filter_entries";
const std::string TablePropertiesNames::kDeletedKeys = "rocksdb.deleted.keys";
@ -285,8 +273,7 @@ const std::string TablePropertiesNames::kMergeOperands =
"rocksdb.merge.operands";
const std::string TablePropertiesNames::kNumRangeDeletions =
"rocksdb.num.range-deletions";
const std::string TablePropertiesNames::kFilterPolicy =
"rocksdb.filter.policy";
const std::string TablePropertiesNames::kFilterPolicy = "rocksdb.filter.policy";
const std::string TablePropertiesNames::kFormatVersion =
"rocksdb.format.version";
const std::string TablePropertiesNames::kFixedKeyLen =

View File

@ -9,6 +9,7 @@
#pragma once
#include <memory>
#include "db/range_tombstone_fragmenter.h"
#if USE_COROUTINES
#include "folly/experimental/coro/Coroutine.h"
@ -161,8 +162,8 @@ class TableReader {
// persists the data on a non volatile storage medium like disk/SSD
virtual Status Prefetch(const Slice* begin = nullptr,
const Slice* end = nullptr) {
(void) begin;
(void) end;
(void)begin;
(void)end;
// Default implementation is NOOP.
// The child class should implement functionality when applicable
return Status::OK();

View File

@ -224,9 +224,10 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
}
}
if (count != r2_len) {
fprintf(
stderr, "Iterator cannot iterate expected number of entries. "
"Expected %d but got %d\n", r2_len, count);
fprintf(stderr,
"Iterator cannot iterate expected number of entries. "
"Expected %d but got %d\n",
r2_len, count);
assert(false);
}
delete iter;
@ -261,16 +262,16 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
} // namespace
} // namespace ROCKSDB_NAMESPACE
DEFINE_bool(query_empty, false, "query non-existing keys instead of existing "
"ones.");
DEFINE_bool(query_empty, false,
"query non-existing keys instead of existing ones.");
DEFINE_int32(num_keys1, 4096, "number of distinguish prefix of keys");
DEFINE_int32(num_keys2, 512, "number of distinguish keys for each prefix");
DEFINE_int32(iter, 3, "query non-existing keys instead of existing ones");
DEFINE_int32(prefix_len, 16, "Prefix length used for iterators and indexes");
DEFINE_bool(iterator, false, "For test iterator");
DEFINE_bool(through_db, false, "If enable, a DB instance will be created and "
"the query will be against DB. Otherwise, will be directly against "
"a table reader.");
DEFINE_bool(through_db, false,
"If enable, a DB instance will be created and the query will be "
"against DB. Otherwise, will be directly against a table reader.");
DEFINE_bool(mmap_read, true, "Whether use mmap read");
DEFINE_string(table_factory, "block_based",
"Table factory to use: `block_based` (default), `plain_table` or "

View File

@ -186,7 +186,7 @@ class Constructor {
public:
explicit Constructor(const Comparator* cmp)
: data_(stl_wrappers::LessOfComparator(cmp)) {}
virtual ~Constructor() { }
virtual ~Constructor() {}
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
@ -492,7 +492,7 @@ class TableConstructor : public Constructor {
};
uint64_t TableConstructor::cur_file_num_ = 1;
class MemTableConstructor: public Constructor {
class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp, WriteBufferManager* wb)
: Constructor(cmp),
@ -566,11 +566,10 @@ class InternalIteratorFromIterator : public InternalIterator {
std::unique_ptr<Iterator> it_;
};
class DBConstructor: public Constructor {
class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
: Constructor(cmp),
comparator_(cmp) {
: Constructor(cmp), comparator_(cmp) {
db_ = nullptr;
NewDB();
}
@ -654,15 +653,15 @@ std::ostream& operator<<(std::ostream& os, const TestArgs& args) {
static std::vector<TestArgs> GenerateArgList() {
std::vector<TestArgs> test_args;
std::vector<TestType> test_types = {
BLOCK_BASED_TABLE_TEST,
std::vector<TestType> test_types = {BLOCK_BASED_TABLE_TEST,
#ifndef ROCKSDB_LITE
PLAIN_TABLE_SEMI_FIXED_PREFIX,
PLAIN_TABLE_FULL_STR_PREFIX,
PLAIN_TABLE_TOTAL_ORDER,
PLAIN_TABLE_SEMI_FIXED_PREFIX,
PLAIN_TABLE_FULL_STR_PREFIX,
PLAIN_TABLE_TOTAL_ORDER,
#endif // !ROCKSDB_LITE
BLOCK_TEST,
MEMTABLE_TEST, DB_TEST};
BLOCK_TEST,
MEMTABLE_TEST,
DB_TEST};
std::vector<bool> reverse_compare_types = {false, true};
std::vector<int> restart_intervals = {16, 1, 1024};
std::vector<uint32_t> compression_parallel_threads = {1, 4};
@ -747,9 +746,8 @@ class FixedOrLessPrefixTransform : public SliceTransform {
const size_t prefix_len_;
public:
explicit FixedOrLessPrefixTransform(size_t prefix_len) :
prefix_len_(prefix_len) {
}
explicit FixedOrLessPrefixTransform(size_t prefix_len)
: prefix_len_(prefix_len) {}
const char* Name() const override { return "rocksdb.FixedPrefix"; }
@ -964,8 +962,8 @@ class HarnessTest : public testing::Test {
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
EscapeString(key).c_str());
if (kVerbose)
fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_OK(iter->status());
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
@ -978,7 +976,7 @@ class HarnessTest : public testing::Test {
iter->Prev();
ASSERT_OK(iter->status());
if (model_iter == data.begin()) {
model_iter = data.end(); // Wrap around to invalid value
model_iter = data.end(); // Wrap around to invalid value
} else {
--model_iter;
}
@ -1047,14 +1045,14 @@ class HarnessTest : public testing::Test {
break;
case 1: {
// Attempt to return something smaller than an existing key
if (result.size() > 0 && result[result.size() - 1] > '\0'
&& (!only_support_prefix_seek_
|| options_.prefix_extractor->Transform(result).size()
< result.size())) {
if (result.size() > 0 && result[result.size() - 1] > '\0' &&
(!only_support_prefix_seek_ ||
options_.prefix_extractor->Transform(result).size() <
result.size())) {
result[result.size() - 1]--;
}
break;
}
}
case 2: {
// Return something larger than an existing key
Increment(options_.comparator, &result);
@ -1103,8 +1101,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val),
(unsigned long long)(low),
(unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
@ -1183,8 +1180,8 @@ class BlockBasedTableTest
{
std::unique_ptr<TraceReader> trace_reader;
Status s =
NewFileTraceReader(env_, EnvOptions(), trace_file_path_, &trace_reader);
Status s = NewFileTraceReader(env_, EnvOptions(), trace_file_path_,
&trace_reader);
EXPECT_OK(s);
BlockCacheTraceReader reader(std::move(trace_reader));
BlockCacheTraceHeader header;
@ -1249,8 +1246,7 @@ class BBTTailPrefetchTest : public TableTest {};
class FileChecksumTestHelper {
public:
FileChecksumTestHelper(bool convert_to_internal_key = false)
: convert_to_internal_key_(convert_to_internal_key) {
}
: convert_to_internal_key_(convert_to_internal_key) {}
~FileChecksumTestHelper() {}
void CreateWritableFile() {
@ -1368,22 +1364,18 @@ INSTANTIATE_TEST_CASE_P(FormatVersions, BlockBasedTableTest,
// This test serves as the living tutorial for the prefix scan of user collected
// properties.
TEST_F(TablePropertyTest, PrefixScanTest) {
UserCollectedProperties props{{"num.111.1", "1"},
{"num.111.2", "2"},
{"num.111.3", "3"},
{"num.333.1", "1"},
{"num.333.2", "2"},
{"num.333.3", "3"},
{"num.555.1", "1"},
{"num.555.2", "2"},
{"num.555.3", "3"}, };
UserCollectedProperties props{
{"num.111.1", "1"}, {"num.111.2", "2"}, {"num.111.3", "3"},
{"num.333.1", "1"}, {"num.333.2", "2"}, {"num.333.3", "3"},
{"num.555.1", "1"}, {"num.555.2", "2"}, {"num.555.3", "3"},
};
// prefixes that exist
for (const std::string prefix : {"num.111", "num.333", "num.555"}) {
int num = 0;
for (auto pos = props.lower_bound(prefix);
pos != props.end() &&
pos->first.compare(0, prefix.size(), prefix) == 0;
pos->first.compare(0, prefix.size(), prefix) == 0;
++pos) {
++num;
auto key = prefix + "." + std::to_string(num);
@ -2031,7 +2023,6 @@ TEST_P(BlockBasedTableTest, PrefetchTest) {
// [ k05 ] k05
// [ k06 k07 ] k07
// Simple
PrefetchRange(&c, &opt, &table_options,
/*key_range=*/"k01", "k05",
@ -2069,35 +2060,35 @@ TEST_P(BlockBasedTableTest, TotalOrderSeekOnHashIndex) {
// Make each key/value an individual block
table_options.block_size = 64;
switch (i) {
case 0:
// Binary search index
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
break;
case 1:
// Hash search index
table_options.index_type = BlockBasedTableOptions::kHashSearch;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
break;
case 2:
// Hash search index with filter policy
table_options.index_type = BlockBasedTableOptions::kHashSearch;
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
break;
case 3:
// Two-level index
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
break;
case 4:
// Binary search with first key
table_options.index_type =
BlockBasedTableOptions::kBinarySearchWithFirstKey;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
break;
case 0:
// Binary search index
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
break;
case 1:
// Hash search index
table_options.index_type = BlockBasedTableOptions::kHashSearch;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
break;
case 2:
// Hash search index with filter policy
table_options.index_type = BlockBasedTableOptions::kHashSearch;
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
break;
case 3:
// Two-level index
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
break;
case 4:
// Binary search with first key
table_options.index_type =
BlockBasedTableOptions::kBinarySearchWithFirstKey;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
break;
}
TableConstructor c(BytewiseComparator(),
@ -2452,7 +2443,12 @@ void TableTest::IndexTest(BlockBasedTableOptions table_options) {
}
// find the upper bound of prefixes
std::vector<std::string> upper_bound = {keys[1], keys[2], keys[7], keys[9], };
std::vector<std::string> upper_bound = {
keys[1],
keys[2],
keys[7],
keys[9],
};
// find existing keys
for (const auto& item : kvmap) {
@ -3969,19 +3965,19 @@ TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) {
c.Finish(options, ioptions, moptions, table_options, internal_comparator,
&keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
// k04 and k05 will be in two consecutive blocks, the index is
// an arbitrary slice between k04 and k05, either before or after k04a
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 10000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
c.ResetTableReader();
}
@ -4045,8 +4041,7 @@ TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) {
if (!XPRESS_Supported()) {
fprintf(stderr, "skipping xpress and xpress compression tests\n");
}
else {
} else {
compression_state.push_back(kXpressCompression);
}

View File

@ -8,6 +8,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "table/two_level_iterator.h"
#include "db/pinned_iterators_manager.h"
#include "memory/arena.h"
#include "rocksdb/options.h"

View File

@ -8,8 +8,8 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include "rocksdb/iterator.h"
#include "rocksdb/env.h"
#include "rocksdb/iterator.h"
#include "table/iterator_wrapper.h"
namespace ROCKSDB_NAMESPACE {