2017-03-04 02:09:43 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2017-03-04 02:09:43 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <assert.h>
|
2019-06-06 20:52:39 +00:00
|
|
|
#include <cinttypes>
|
2017-03-07 21:48:02 +00:00
|
|
|
|
|
|
|
#include <list>
|
2017-03-04 02:09:43 +00:00
|
|
|
#include <string>
|
|
|
|
#include <unordered_map>
|
|
|
|
|
|
|
|
#include "rocksdb/comparator.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block_based_table_factory.h"
|
|
|
|
#include "table/block_based/block_builder.h"
|
2017-03-04 02:09:43 +00:00
|
|
|
#include "table/format.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-03-04 02:09:43 +00:00
|
|
|
// The interface for building index.
|
|
|
|
// Instruction for adding a new concrete IndexBuilder:
|
|
|
|
// 1. Create a subclass instantiated from IndexBuilder.
|
|
|
|
// 2. Add a new entry associated with that subclass in TableOptions::IndexType.
|
|
|
|
// 3. Add a create function for the new subclass in CreateIndexBuilder.
|
|
|
|
// Note: we can devise more advanced design to simplify the process for adding
|
|
|
|
// new subclass, which will, on the other hand, increase the code complexity and
|
|
|
|
// catch unwanted attention from readers. Given that we won't add/change
|
|
|
|
// indexes frequently, it makes sense to just embrace a more straightforward
|
|
|
|
// design that just works.
|
|
|
|
class IndexBuilder {
|
|
|
|
public:
|
|
|
|
static IndexBuilder* CreateIndexBuilder(
|
|
|
|
BlockBasedTableOptions::IndexType index_type,
|
2020-02-20 20:07:53 +00:00
|
|
|
const ROCKSDB_NAMESPACE::InternalKeyComparator* comparator,
|
2017-03-07 21:48:02 +00:00
|
|
|
const InternalKeySliceTransform* int_key_slice_transform,
|
2018-08-09 23:49:45 +00:00
|
|
|
const bool use_value_delta_encoding,
|
2017-03-07 21:48:02 +00:00
|
|
|
const BlockBasedTableOptions& table_opt);
|
2017-03-04 02:09:43 +00:00
|
|
|
|
|
|
|
// Index builder will construct a set of blocks which contain:
|
|
|
|
// 1. One primary index block.
|
|
|
|
// 2. (Optional) a set of metablocks that contains the metadata of the
|
|
|
|
// primary index.
|
|
|
|
struct IndexBlocks {
|
|
|
|
Slice index_block_contents;
|
|
|
|
std::unordered_map<std::string, Slice> meta_blocks;
|
|
|
|
};
|
|
|
|
explicit IndexBuilder(const InternalKeyComparator* comparator)
|
|
|
|
: comparator_(comparator) {}
|
|
|
|
|
|
|
|
virtual ~IndexBuilder() {}
|
|
|
|
|
|
|
|
// Add a new index entry to index block.
|
|
|
|
// To allow further optimization, we provide `last_key_in_current_block` and
|
|
|
|
// `first_key_in_next_block`, based on which the specific implementation can
|
|
|
|
// determine the best index key to be used for the index block.
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
// Called before the OnKeyAdded() call for first_key_in_next_block.
|
2017-03-04 02:09:43 +00:00
|
|
|
// @last_key_in_current_block: this parameter maybe overridden with the value
|
|
|
|
// "substitute key".
|
|
|
|
// @first_key_in_next_block: it will be nullptr if the entry being added is
|
|
|
|
// the last one in the table
|
|
|
|
//
|
|
|
|
// REQUIRES: Finish() has not yet been called.
|
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) = 0;
|
|
|
|
|
|
|
|
// This method will be called whenever a key is added. The subclasses may
|
|
|
|
// override OnKeyAdded() if they need to collect additional information.
|
2018-03-05 21:08:17 +00:00
|
|
|
virtual void OnKeyAdded(const Slice& /*key*/) {}
|
2017-03-04 02:09:43 +00:00
|
|
|
|
|
|
|
// Inform the index builder that all entries has been written. Block builder
|
|
|
|
// may therefore perform any operation required for block finalization.
|
|
|
|
//
|
|
|
|
// REQUIRES: Finish() has not yet been called.
|
|
|
|
inline Status Finish(IndexBlocks* index_blocks) {
|
|
|
|
// Throw away the changes to last_partition_block_handle. It has no effect
|
|
|
|
// on the first call to Finish anyway.
|
|
|
|
BlockHandle last_partition_block_handle;
|
|
|
|
return Finish(index_blocks, last_partition_block_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This override of Finish can be utilized to build the 2nd level index in
|
|
|
|
// PartitionIndexBuilder.
|
|
|
|
//
|
|
|
|
// index_blocks will be filled with the resulting index data. If the return
|
|
|
|
// value is Status::InComplete() then it means that the index is partitioned
|
|
|
|
// and the callee should keep calling Finish until Status::OK() is returned.
|
|
|
|
// In that case, last_partition_block_handle is pointer to the block written
|
|
|
|
// with the result of the last call to Finish. This can be utilized to build
|
|
|
|
// the second level index pointing to each block of partitioned indexes. The
|
|
|
|
// last call to Finish() that returns Status::OK() populates index_blocks with
|
|
|
|
// the 2nd level index content.
|
|
|
|
virtual Status Finish(IndexBlocks* index_blocks,
|
|
|
|
const BlockHandle& last_partition_block_handle) = 0;
|
|
|
|
|
2018-08-10 22:14:44 +00:00
|
|
|
// Get the size for index block. Must be called after ::Finish.
|
|
|
|
virtual size_t IndexSize() const = 0;
|
2017-03-04 02:09:43 +00:00
|
|
|
|
2018-05-26 01:41:31 +00:00
|
|
|
virtual bool seperator_is_key_plus_seq() { return true; }
|
|
|
|
|
2017-03-04 02:09:43 +00:00
|
|
|
protected:
|
|
|
|
const InternalKeyComparator* comparator_;
|
2018-08-10 22:14:44 +00:00
|
|
|
// Set after ::Finish is called
|
|
|
|
size_t index_size_ = 0;
|
2017-03-04 02:09:43 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// This index builder builds space-efficient index block.
|
|
|
|
//
|
|
|
|
// Optimizations:
|
|
|
|
// 1. Made block's `block_restart_interval` to be 1, which will avoid linear
|
|
|
|
// search when doing index lookup (can be disabled by setting
|
|
|
|
// index_block_restart_interval).
|
|
|
|
// 2. Shorten the key length for index block. Other than honestly using the
|
|
|
|
// last key in the data block as the index key, we instead find a shortest
|
|
|
|
// substitute key that serves the same function.
|
|
|
|
class ShortenedIndexBuilder : public IndexBuilder {
|
|
|
|
public:
|
2019-04-22 15:17:45 +00:00
|
|
|
explicit ShortenedIndexBuilder(
|
|
|
|
const InternalKeyComparator* comparator,
|
|
|
|
const int index_block_restart_interval, const uint32_t format_version,
|
|
|
|
const bool use_value_delta_encoding,
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
BlockBasedTableOptions::IndexShorteningMode shortening_mode,
|
|
|
|
bool include_first_key)
|
2017-03-04 02:09:43 +00:00
|
|
|
: IndexBuilder(comparator),
|
2018-08-09 23:49:45 +00:00
|
|
|
index_block_builder_(index_block_restart_interval,
|
|
|
|
true /*use_delta_encoding*/,
|
|
|
|
use_value_delta_encoding),
|
|
|
|
index_block_builder_without_seq_(index_block_restart_interval,
|
|
|
|
true /*use_delta_encoding*/,
|
2019-04-22 15:17:45 +00:00
|
|
|
use_value_delta_encoding),
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
use_value_delta_encoding_(use_value_delta_encoding),
|
|
|
|
include_first_key_(include_first_key),
|
2019-04-22 15:17:45 +00:00
|
|
|
shortening_mode_(shortening_mode) {
|
2018-05-26 01:41:31 +00:00
|
|
|
// Making the default true will disable the feature for old versions
|
|
|
|
seperator_is_key_plus_seq_ = (format_version <= 2);
|
|
|
|
}
|
2017-03-04 02:09:43 +00:00
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
virtual void OnKeyAdded(const Slice& key) override {
|
|
|
|
if (include_first_key_ && current_block_first_internal_key_.empty()) {
|
|
|
|
current_block_first_internal_key_.assign(key.data(), key.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-04 02:09:43 +00:00
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) override {
|
|
|
|
if (first_key_in_next_block != nullptr) {
|
2019-04-22 15:17:45 +00:00
|
|
|
if (shortening_mode_ !=
|
|
|
|
BlockBasedTableOptions::IndexShorteningMode::kNoShortening) {
|
2022-07-14 17:09:31 +00:00
|
|
|
FindShortestInternalKeySeparator(*comparator_->user_comparator(),
|
|
|
|
last_key_in_current_block,
|
|
|
|
*first_key_in_next_block);
|
2019-04-22 15:17:45 +00:00
|
|
|
}
|
2018-05-26 01:41:31 +00:00
|
|
|
if (!seperator_is_key_plus_seq_ &&
|
|
|
|
comparator_->user_comparator()->Compare(
|
|
|
|
ExtractUserKey(*last_key_in_current_block),
|
|
|
|
ExtractUserKey(*first_key_in_next_block)) == 0) {
|
|
|
|
seperator_is_key_plus_seq_ = true;
|
|
|
|
}
|
2017-03-04 02:09:43 +00:00
|
|
|
} else {
|
2019-04-22 15:17:45 +00:00
|
|
|
if (shortening_mode_ == BlockBasedTableOptions::IndexShorteningMode::
|
|
|
|
kShortenSeparatorsAndSuccessor) {
|
2022-07-14 17:09:31 +00:00
|
|
|
FindShortInternalKeySuccessor(*comparator_->user_comparator(),
|
|
|
|
last_key_in_current_block);
|
2019-04-22 15:17:45 +00:00
|
|
|
}
|
2017-03-04 02:09:43 +00:00
|
|
|
}
|
2018-05-26 01:41:31 +00:00
|
|
|
auto sep = Slice(*last_key_in_current_block);
|
2017-03-04 02:09:43 +00:00
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
assert(!include_first_key_ || !current_block_first_internal_key_.empty());
|
|
|
|
IndexValue entry(block_handle, current_block_first_internal_key_);
|
|
|
|
std::string encoded_entry;
|
|
|
|
std::string delta_encoded_entry;
|
|
|
|
entry.EncodeTo(&encoded_entry, include_first_key_, nullptr);
|
|
|
|
if (use_value_delta_encoding_ && !last_encoded_handle_.IsNull()) {
|
|
|
|
entry.EncodeTo(&delta_encoded_entry, include_first_key_,
|
|
|
|
&last_encoded_handle_);
|
|
|
|
} else {
|
|
|
|
// If it's the first block, or delta encoding is disabled,
|
|
|
|
// BlockBuilder::Add() below won't use delta-encoded slice.
|
|
|
|
}
|
2018-08-09 23:49:45 +00:00
|
|
|
last_encoded_handle_ = block_handle;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
const Slice delta_encoded_entry_slice(delta_encoded_entry);
|
|
|
|
index_block_builder_.Add(sep, encoded_entry, &delta_encoded_entry_slice);
|
2018-05-26 01:41:31 +00:00
|
|
|
if (!seperator_is_key_plus_seq_) {
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
index_block_builder_without_seq_.Add(ExtractUserKey(sep), encoded_entry,
|
|
|
|
&delta_encoded_entry_slice);
|
2018-05-26 01:41:31 +00:00
|
|
|
}
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
|
|
|
|
current_block_first_internal_key_.clear();
|
2017-03-04 02:09:43 +00:00
|
|
|
}
|
|
|
|
|
2017-03-28 18:56:56 +00:00
|
|
|
using IndexBuilder::Finish;
|
2017-03-04 02:09:43 +00:00
|
|
|
virtual Status Finish(
|
|
|
|
IndexBlocks* index_blocks,
|
2018-03-05 21:08:17 +00:00
|
|
|
const BlockHandle& /*last_partition_block_handle*/) override {
|
2018-05-26 01:41:31 +00:00
|
|
|
if (seperator_is_key_plus_seq_) {
|
|
|
|
index_blocks->index_block_contents = index_block_builder_.Finish();
|
|
|
|
} else {
|
|
|
|
index_blocks->index_block_contents =
|
|
|
|
index_block_builder_without_seq_.Finish();
|
|
|
|
}
|
2018-08-10 22:14:44 +00:00
|
|
|
index_size_ = index_blocks->index_block_contents.size();
|
2017-03-04 02:09:43 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-08-23 17:04:10 +00:00
|
|
|
virtual size_t IndexSize() const override { return index_size_; }
|
2018-05-26 01:41:31 +00:00
|
|
|
|
|
|
|
virtual bool seperator_is_key_plus_seq() override {
|
|
|
|
return seperator_is_key_plus_seq_;
|
2017-03-04 02:09:43 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 17:09:31 +00:00
|
|
|
// Changes *key to a short string >= *key.
|
|
|
|
//
|
|
|
|
static void FindShortestInternalKeySeparator(const Comparator& comparator,
|
|
|
|
std::string* start,
|
|
|
|
const Slice& limit);
|
|
|
|
|
|
|
|
static void FindShortInternalKeySuccessor(const Comparator& comparator,
|
|
|
|
std::string* key);
|
|
|
|
|
2017-03-28 18:56:56 +00:00
|
|
|
friend class PartitionedIndexBuilder;
|
|
|
|
|
2017-03-04 02:09:43 +00:00
|
|
|
private:
|
|
|
|
BlockBuilder index_block_builder_;
|
2018-05-26 01:41:31 +00:00
|
|
|
BlockBuilder index_block_builder_without_seq_;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
const bool use_value_delta_encoding_;
|
2018-05-26 01:41:31 +00:00
|
|
|
bool seperator_is_key_plus_seq_;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
const bool include_first_key_;
|
2019-04-22 15:17:45 +00:00
|
|
|
BlockBasedTableOptions::IndexShorteningMode shortening_mode_;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
BlockHandle last_encoded_handle_ = BlockHandle::NullBlockHandle();
|
|
|
|
std::string current_block_first_internal_key_;
|
2017-03-04 02:09:43 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// HashIndexBuilder contains a binary-searchable primary index and the
|
|
|
|
// metadata for secondary hash index construction.
|
|
|
|
// The metadata for hash index consists two parts:
|
|
|
|
// - a metablock that compactly contains a sequence of prefixes. All prefixes
|
|
|
|
// are stored consectively without any metadata (like, prefix sizes) being
|
|
|
|
// stored, which is kept in the other metablock.
|
|
|
|
// - a metablock contains the metadata of the prefixes, including prefix size,
|
|
|
|
// restart index and number of block it spans. The format looks like:
|
|
|
|
//
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
// <=prefix 1
|
|
|
|
// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
// <=prefix 2
|
|
|
|
// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
// | |
|
|
|
|
// | .... |
|
|
|
|
// | |
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
// <=prefix n
|
|
|
|
// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
//
|
|
|
|
// The reason of separating these two metablocks is to enable the efficiently
|
|
|
|
// reuse the first metablock during hash index construction without unnecessary
|
|
|
|
// data copy or small heap allocations for prefixes.
|
|
|
|
class HashIndexBuilder : public IndexBuilder {
|
|
|
|
public:
|
2019-04-22 15:17:45 +00:00
|
|
|
explicit HashIndexBuilder(
|
|
|
|
const InternalKeyComparator* comparator,
|
|
|
|
const SliceTransform* hash_key_extractor,
|
|
|
|
int index_block_restart_interval, int format_version,
|
|
|
|
bool use_value_delta_encoding,
|
|
|
|
BlockBasedTableOptions::IndexShorteningMode shortening_mode)
|
2017-03-04 02:09:43 +00:00
|
|
|
: IndexBuilder(comparator),
|
2018-05-26 01:41:31 +00:00
|
|
|
primary_index_builder_(comparator, index_block_restart_interval,
|
2019-04-22 15:17:45 +00:00
|
|
|
format_version, use_value_delta_encoding,
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
shortening_mode, /* include_first_key */ false),
|
2017-03-04 02:09:43 +00:00
|
|
|
hash_key_extractor_(hash_key_extractor) {}
|
|
|
|
|
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) override {
|
|
|
|
++current_restart_index_;
|
|
|
|
primary_index_builder_.AddIndexEntry(last_key_in_current_block,
|
|
|
|
first_key_in_next_block, block_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void OnKeyAdded(const Slice& key) override {
|
|
|
|
auto key_prefix = hash_key_extractor_->Transform(key);
|
|
|
|
bool is_first_entry = pending_block_num_ == 0;
|
|
|
|
|
|
|
|
// Keys may share the prefix
|
|
|
|
if (is_first_entry || pending_entry_prefix_ != key_prefix) {
|
|
|
|
if (!is_first_entry) {
|
|
|
|
FlushPendingPrefix();
|
|
|
|
}
|
|
|
|
|
|
|
|
// need a hard copy otherwise the underlying data changes all the time.
|
2022-05-06 20:03:58 +00:00
|
|
|
// TODO(kailiu) std::to_string() is expensive. We may speed up can avoid
|
|
|
|
// data copy.
|
2017-03-04 02:09:43 +00:00
|
|
|
pending_entry_prefix_ = key_prefix.ToString();
|
|
|
|
pending_block_num_ = 1;
|
|
|
|
pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
|
|
|
|
} else {
|
|
|
|
// entry number increments when keys share the prefix reside in
|
|
|
|
// different data blocks.
|
|
|
|
auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1;
|
|
|
|
assert(last_restart_index <= current_restart_index_);
|
|
|
|
if (last_restart_index != current_restart_index_) {
|
|
|
|
++pending_block_num_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Finish(
|
|
|
|
IndexBlocks* index_blocks,
|
|
|
|
const BlockHandle& last_partition_block_handle) override {
|
2018-07-11 22:45:21 +00:00
|
|
|
if (pending_block_num_ != 0) {
|
|
|
|
FlushPendingPrefix();
|
|
|
|
}
|
2020-09-29 16:47:33 +00:00
|
|
|
Status s = primary_index_builder_.Finish(index_blocks,
|
|
|
|
last_partition_block_handle);
|
2017-03-04 02:09:43 +00:00
|
|
|
index_blocks->meta_blocks.insert(
|
|
|
|
{kHashIndexPrefixesBlock.c_str(), prefix_block_});
|
|
|
|
index_blocks->meta_blocks.insert(
|
|
|
|
{kHashIndexPrefixesMetadataBlock.c_str(), prefix_meta_block_});
|
2020-09-29 16:47:33 +00:00
|
|
|
return s;
|
2017-03-04 02:09:43 +00:00
|
|
|
}
|
|
|
|
|
2018-08-10 22:14:44 +00:00
|
|
|
virtual size_t IndexSize() const override {
|
|
|
|
return primary_index_builder_.IndexSize() + prefix_block_.size() +
|
2017-03-04 02:09:43 +00:00
|
|
|
prefix_meta_block_.size();
|
|
|
|
}
|
|
|
|
|
2018-05-26 01:41:31 +00:00
|
|
|
virtual bool seperator_is_key_plus_seq() override {
|
|
|
|
return primary_index_builder_.seperator_is_key_plus_seq();
|
|
|
|
}
|
|
|
|
|
2017-03-04 02:09:43 +00:00
|
|
|
private:
|
|
|
|
void FlushPendingPrefix() {
|
|
|
|
prefix_block_.append(pending_entry_prefix_.data(),
|
|
|
|
pending_entry_prefix_.size());
|
|
|
|
PutVarint32Varint32Varint32(
|
|
|
|
&prefix_meta_block_,
|
|
|
|
static_cast<uint32_t>(pending_entry_prefix_.size()),
|
|
|
|
pending_entry_index_, pending_block_num_);
|
|
|
|
}
|
|
|
|
|
|
|
|
ShortenedIndexBuilder primary_index_builder_;
|
|
|
|
const SliceTransform* hash_key_extractor_;
|
|
|
|
|
|
|
|
// stores a sequence of prefixes
|
|
|
|
std::string prefix_block_;
|
|
|
|
// stores the metadata of prefixes
|
|
|
|
std::string prefix_meta_block_;
|
|
|
|
|
|
|
|
// The following 3 variables keeps unflushed prefix and its metadata.
|
|
|
|
// The details of block_num and entry_index can be found in
|
|
|
|
// "block_hash_index.{h,cc}"
|
|
|
|
uint32_t pending_block_num_ = 0;
|
|
|
|
uint32_t pending_entry_index_ = 0;
|
|
|
|
std::string pending_entry_prefix_;
|
|
|
|
|
|
|
|
uint64_t current_restart_index_ = 0;
|
|
|
|
};
|
2017-03-07 21:48:02 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* IndexBuilder for two-level indexing. Internally it creates a new index for
|
|
|
|
* each partition and Finish then in order when Finish is called on it
|
|
|
|
* continiously until Status::OK() is returned.
|
|
|
|
*
|
|
|
|
* The format on the disk would be I I I I I I IP where I is block containing a
|
|
|
|
* partition of indexes built using ShortenedIndexBuilder and IP is a block
|
|
|
|
* containing a secondary index on the partitions, built using
|
|
|
|
* ShortenedIndexBuilder.
|
|
|
|
*/
|
|
|
|
class PartitionedIndexBuilder : public IndexBuilder {
|
|
|
|
public:
|
|
|
|
static PartitionedIndexBuilder* CreateIndexBuilder(
|
2020-02-20 20:07:53 +00:00
|
|
|
const ROCKSDB_NAMESPACE::InternalKeyComparator* comparator,
|
2018-08-09 23:49:45 +00:00
|
|
|
const bool use_value_delta_encoding,
|
2017-03-07 21:48:02 +00:00
|
|
|
const BlockBasedTableOptions& table_opt);
|
|
|
|
|
|
|
|
explicit PartitionedIndexBuilder(const InternalKeyComparator* comparator,
|
2018-08-09 23:49:45 +00:00
|
|
|
const BlockBasedTableOptions& table_opt,
|
|
|
|
const bool use_value_delta_encoding);
|
2017-03-07 21:48:02 +00:00
|
|
|
|
|
|
|
virtual ~PartitionedIndexBuilder();
|
|
|
|
|
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) override;
|
|
|
|
|
|
|
|
virtual Status Finish(
|
|
|
|
IndexBlocks* index_blocks,
|
|
|
|
const BlockHandle& last_partition_block_handle) override;
|
|
|
|
|
2018-08-23 17:04:10 +00:00
|
|
|
virtual size_t IndexSize() const override { return index_size_; }
|
|
|
|
size_t TopLevelIndexSize(uint64_t) const { return top_level_index_size_; }
|
2017-06-13 17:59:22 +00:00
|
|
|
size_t NumPartitions() const;
|
2017-03-07 21:48:02 +00:00
|
|
|
|
|
|
|
inline bool ShouldCutFilterBlock() {
|
|
|
|
// Current policy is to align the partitions of index and filters
|
|
|
|
if (cut_filter_block) {
|
|
|
|
cut_filter_block = false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-28 18:56:56 +00:00
|
|
|
std::string& GetPartitionKey() { return sub_index_last_key_; }
|
2017-03-07 21:48:02 +00:00
|
|
|
|
2017-07-02 17:36:10 +00:00
|
|
|
// Called when an external entity (such as filter partition builder) request
|
|
|
|
// cutting the next partition
|
|
|
|
void RequestPartitionCut();
|
|
|
|
|
2018-05-26 01:41:31 +00:00
|
|
|
virtual bool seperator_is_key_plus_seq() override {
|
|
|
|
return seperator_is_key_plus_seq_;
|
|
|
|
}
|
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
bool get_use_value_delta_encoding() { return use_value_delta_encoding_; }
|
|
|
|
|
2017-03-07 21:48:02 +00:00
|
|
|
private:
|
2018-08-10 22:14:44 +00:00
|
|
|
// Set after ::Finish is called
|
|
|
|
size_t top_level_index_size_ = 0;
|
|
|
|
// Set after ::Finish is called
|
|
|
|
size_t partition_cnt_ = 0;
|
|
|
|
|
2017-03-28 18:56:56 +00:00
|
|
|
void MakeNewSubIndexBuilder();
|
|
|
|
|
2017-03-07 21:48:02 +00:00
|
|
|
struct Entry {
|
|
|
|
std::string key;
|
2017-03-28 18:56:56 +00:00
|
|
|
std::unique_ptr<ShortenedIndexBuilder> value;
|
2017-03-07 21:48:02 +00:00
|
|
|
};
|
|
|
|
std::list<Entry> entries_; // list of partitioned indexes and their keys
|
2019-04-22 15:17:45 +00:00
|
|
|
BlockBuilder index_block_builder_; // top-level index builder
|
2018-06-06 23:44:52 +00:00
|
|
|
BlockBuilder index_block_builder_without_seq_; // same for user keys
|
2017-03-28 18:56:56 +00:00
|
|
|
// the active partition index builder
|
|
|
|
ShortenedIndexBuilder* sub_index_builder_;
|
|
|
|
// the last key in the active partition index builder
|
|
|
|
std::string sub_index_last_key_;
|
|
|
|
std::unique_ptr<FlushBlockPolicy> flush_policy_;
|
|
|
|
// true if Finish is called once but not complete yet.
|
|
|
|
bool finishing_indexes = false;
|
2017-03-07 21:48:02 +00:00
|
|
|
const BlockBasedTableOptions& table_opt_;
|
2018-05-26 01:41:31 +00:00
|
|
|
bool seperator_is_key_plus_seq_;
|
2018-08-09 23:49:45 +00:00
|
|
|
bool use_value_delta_encoding_;
|
2017-07-02 17:36:10 +00:00
|
|
|
// true if an external entity (such as filter partition builder) request
|
|
|
|
// cutting the next partition
|
|
|
|
bool partition_cut_requested_ = true;
|
2017-03-28 18:56:56 +00:00
|
|
|
// true if it should cut the next filter partition block
|
|
|
|
bool cut_filter_block = false;
|
2018-08-09 23:49:45 +00:00
|
|
|
BlockHandle last_encoded_handle_;
|
2017-03-07 21:48:02 +00:00
|
|
|
};
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|