2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
2022-05-04 00:37:19 +00:00
|
|
|
|
2016-06-14 19:27:46 +00:00
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
#include "db/kv_checksum.h"
|
2016-04-26 19:41:07 +00:00
|
|
|
#include "db/pinned_iterators_manager.h"
|
2019-09-16 22:14:51 +00:00
|
|
|
#include "port/malloc.h"
|
2023-02-09 20:12:02 +00:00
|
|
|
#include "rocksdb/advanced_cache.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/iterator.h"
|
2013-09-02 06:23:40 +00:00
|
|
|
#include "rocksdb/options.h"
|
2016-08-27 01:55:58 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
2018-08-15 21:27:47 +00:00
|
|
|
#include "rocksdb/table.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block_prefix_index.h"
|
|
|
|
#include "table/block_based/data_block_hash_index.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "table/format.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/internal_iterator.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/random.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
struct BlockContents;
|
2011-03-18 22:37:00 +00:00
|
|
|
class Comparator;
|
2018-08-09 23:49:45 +00:00
|
|
|
template <class TValue>
|
2014-07-30 23:34:35 +00:00
|
|
|
class BlockIter;
|
2018-07-13 00:19:57 +00:00
|
|
|
class DataBlockIter;
|
|
|
|
class IndexBlockIter;
|
2021-12-21 19:29:52 +00:00
|
|
|
class MetaBlockIter;
|
2014-06-13 02:03:22 +00:00
|
|
|
class BlockPrefixIndex;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
// BlockReadAmpBitmap is a bitmap that map the ROCKSDB_NAMESPACE::Block data
|
|
|
|
// bytes to a bitmap with ratio bytes_per_bit. Whenever we access a range of
|
|
|
|
// bytes in the Block we update the bitmap and increment
|
|
|
|
// READ_AMP_ESTIMATE_USEFUL_BYTES.
|
2016-08-27 01:55:58 +00:00
|
|
|
class BlockReadAmpBitmap {
|
|
|
|
public:
|
|
|
|
explicit BlockReadAmpBitmap(size_t block_size, size_t bytes_per_bit,
|
|
|
|
Statistics* statistics)
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
: bitmap_(nullptr),
|
|
|
|
bytes_per_bit_pow_(0),
|
|
|
|
statistics_(statistics),
|
2019-03-27 23:13:08 +00:00
|
|
|
rnd_(Random::GetTLSInstance()->Uniform(
|
|
|
|
static_cast<int>(bytes_per_bit))) {
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("BlockReadAmpBitmap:rnd", &rnd_);
|
2016-08-27 01:55:58 +00:00
|
|
|
assert(block_size > 0 && bytes_per_bit > 0);
|
|
|
|
|
|
|
|
// convert bytes_per_bit to be a power of 2
|
|
|
|
while (bytes_per_bit >>= 1) {
|
|
|
|
bytes_per_bit_pow_++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// num_bits_needed = ceil(block_size / bytes_per_bit)
|
2019-03-27 23:13:08 +00:00
|
|
|
size_t num_bits_needed = ((block_size - 1) >> bytes_per_bit_pow_) + 1;
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
assert(num_bits_needed > 0);
|
2016-08-27 01:55:58 +00:00
|
|
|
|
|
|
|
// bitmap_size = ceil(num_bits_needed / kBitsPerEntry)
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
size_t bitmap_size = (num_bits_needed - 1) / kBitsPerEntry + 1;
|
2016-08-27 01:55:58 +00:00
|
|
|
|
|
|
|
// Create bitmap and set all the bits to 0
|
2017-07-17 17:33:12 +00:00
|
|
|
bitmap_ = new std::atomic<uint32_t>[bitmap_size]();
|
2016-08-27 01:55:58 +00:00
|
|
|
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
RecordTick(GetStatistics(), READ_AMP_TOTAL_READ_BYTES, block_size);
|
2016-08-27 01:55:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~BlockReadAmpBitmap() { delete[] bitmap_; }
|
|
|
|
|
|
|
|
void Mark(uint32_t start_offset, uint32_t end_offset) {
|
|
|
|
assert(end_offset >= start_offset);
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
// Index of first bit in mask
|
|
|
|
uint32_t start_bit =
|
|
|
|
(start_offset + (1 << bytes_per_bit_pow_) - rnd_ - 1) >>
|
|
|
|
bytes_per_bit_pow_;
|
|
|
|
// Index of last bit in mask + 1
|
|
|
|
uint32_t exclusive_end_bit =
|
|
|
|
(end_offset + (1 << bytes_per_bit_pow_) - rnd_) >> bytes_per_bit_pow_;
|
|
|
|
if (start_bit >= exclusive_end_bit) {
|
|
|
|
return;
|
2016-08-27 01:55:58 +00:00
|
|
|
}
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
assert(exclusive_end_bit > 0);
|
2016-08-27 01:55:58 +00:00
|
|
|
|
|
|
|
if (GetAndSet(start_bit) == 0) {
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
uint32_t new_useful_bytes = (exclusive_end_bit - start_bit)
|
|
|
|
<< bytes_per_bit_pow_;
|
2016-08-27 01:55:58 +00:00
|
|
|
RecordTick(GetStatistics(), READ_AMP_ESTIMATE_USEFUL_BYTES,
|
|
|
|
new_useful_bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Statistics* GetStatistics() {
|
|
|
|
return statistics_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetStatistics(Statistics* stats) { statistics_.store(stats); }
|
|
|
|
|
|
|
|
uint32_t GetBytesPerBit() { return 1 << bytes_per_bit_pow_; }
|
|
|
|
|
2018-06-29 15:55:33 +00:00
|
|
|
size_t ApproximateMemoryUsage() const {
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return malloc_usable_size((void*)this);
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return sizeof(*this);
|
|
|
|
}
|
|
|
|
|
2016-08-27 01:55:58 +00:00
|
|
|
private:
|
|
|
|
// Get the current value of bit at `bit_idx` and set it to 1
|
|
|
|
inline bool GetAndSet(uint32_t bit_idx) {
|
|
|
|
const uint32_t byte_idx = bit_idx / kBitsPerEntry;
|
|
|
|
const uint32_t bit_mask = 1 << (bit_idx % kBitsPerEntry);
|
|
|
|
|
|
|
|
return bitmap_[byte_idx].fetch_or(bit_mask, std::memory_order_relaxed) &
|
|
|
|
bit_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint32_t kBytesPersEntry = sizeof(uint32_t); // 4 bytes
|
|
|
|
const uint32_t kBitsPerEntry = kBytesPersEntry * 8; // 32 bits
|
|
|
|
|
|
|
|
// Bitmap used to record the bytes that we read, use atomic to protect
|
|
|
|
// against multiple threads updating the same bit
|
|
|
|
std::atomic<uint32_t>* bitmap_;
|
|
|
|
// (1 << bytes_per_bit_pow_) is bytes_per_bit. Use power of 2 to optimize
|
|
|
|
// muliplication and division
|
|
|
|
uint8_t bytes_per_bit_pow_;
|
|
|
|
// Pointer to DB Statistics object, Since this bitmap may outlive the DB
|
|
|
|
// this pointer maybe invalid, but the DB will update it to a valid pointer
|
|
|
|
// by using SetStatistics() before calling Mark()
|
|
|
|
std::atomic<Statistics*> statistics_;
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 08:32:52 +00:00
|
|
|
uint32_t rnd_;
|
2016-08-27 01:55:58 +00:00
|
|
|
};
|
|
|
|
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
// class Block is the uncompressed and "parsed" form for blocks containing
|
|
|
|
// key-value pairs. (See BlockContents comments for more on terminology.)
|
|
|
|
// This includes the in-memory representation of data blocks, index blocks
|
|
|
|
// (including partitions), range deletion blocks, properties blocks, metaindex
|
|
|
|
// blocks, as well as the top level of the partitioned filter structure (which
|
|
|
|
// is actually an index of the filter partitions). It is NOT suitable for
|
2019-09-24 17:59:06 +00:00
|
|
|
// compressed blocks in general, filter blocks/partitions, or compression
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
// dictionaries.
|
2019-09-24 17:59:06 +00:00
|
|
|
//
|
|
|
|
// See https://github.com/facebook/rocksdb/wiki/Rocksdb-BlockBasedTable-Format
|
|
|
|
// for details of the format and the various block types.
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
//
|
|
|
|
// TODO: Rename to ParsedKvBlock?
|
2011-03-18 22:37:00 +00:00
|
|
|
class Block {
|
|
|
|
public:
|
|
|
|
// Initialize the block with the specified contents.
|
2020-02-25 23:29:17 +00:00
|
|
|
explicit Block(BlockContents&& contents, size_t read_amp_bytes_per_bit = 0,
|
2016-08-27 01:55:58 +00:00
|
|
|
Statistics* statistics = nullptr);
|
2019-09-12 01:07:12 +00:00
|
|
|
// No copying allowed
|
|
|
|
Block(const Block&) = delete;
|
|
|
|
void operator=(const Block&) = delete;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2018-08-14 00:31:58 +00:00
|
|
|
~Block();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
size_t size() const { return size_; }
|
2014-04-10 21:19:43 +00:00
|
|
|
const char* data() const { return data_; }
|
2018-06-29 15:55:33 +00:00
|
|
|
// The additional memory space taken by the block data.
|
|
|
|
size_t usable_size() const { return contents_.usable_size(); }
|
2014-04-10 21:19:43 +00:00
|
|
|
uint32_t NumRestarts() const;
|
2018-11-14 01:00:49 +00:00
|
|
|
bool own_bytes() const { return contents_.own_bytes(); }
|
|
|
|
|
2018-08-15 21:27:47 +00:00
|
|
|
BlockBasedTableOptions::DataBlockIndexType IndexType() const;
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2020-07-20 21:05:21 +00:00
|
|
|
// raw_ucmp is a raw (i.e., not wrapped by `UserComparatorWrapper`) user key
|
2020-07-08 00:25:08 +00:00
|
|
|
// comparator.
|
2018-05-26 01:41:31 +00:00
|
|
|
//
|
2014-07-30 23:34:35 +00:00
|
|
|
// If iter is null, return new Iterator
|
|
|
|
// If iter is not null, update this one and return it as Iterator*
|
2014-08-25 23:14:30 +00:00
|
|
|
//
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
// Updates read_amp_bitmap_ if it is not nullptr.
|
2018-07-13 00:19:57 +00:00
|
|
|
//
|
2018-11-14 01:00:49 +00:00
|
|
|
// If `block_contents_pinned` is true, the caller will guarantee that when
|
|
|
|
// the cleanup functions are transferred from the iterator to other
|
|
|
|
// classes, e.g. PinnableSlice, the pointer to the bytes will still be
|
|
|
|
// valid. Either the iterator holds cache handle or ownership of some resource
|
|
|
|
// and release them in a release function, or caller is sure that the data
|
|
|
|
// will not go away (for example, it's from mmapped file which will not be
|
|
|
|
// closed).
|
|
|
|
//
|
2023-05-25 22:41:32 +00:00
|
|
|
// `user_defined_timestamps_persisted` controls whether a min timestamp is
|
|
|
|
// padded while key is being parsed from the block.
|
|
|
|
//
|
2018-07-13 00:19:57 +00:00
|
|
|
// NOTE: for the hash based lookup, if a key prefix doesn't match any key,
|
|
|
|
// the iterator will simply be set as "invalid", rather than returning
|
|
|
|
// the key that is just pass the target key.
|
2020-07-20 21:05:21 +00:00
|
|
|
DataBlockIter* NewDataIterator(const Comparator* raw_ucmp,
|
2020-02-25 23:29:17 +00:00
|
|
|
SequenceNumber global_seqno,
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
DataBlockIter* iter = nullptr,
|
|
|
|
Statistics* stats = nullptr,
|
2023-05-25 22:41:32 +00:00
|
|
|
bool block_contents_pinned = false,
|
|
|
|
bool user_defined_timestamps_persisted = true);
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
|
2021-12-21 19:29:52 +00:00
|
|
|
// Returns an MetaBlockIter for iterating over blocks containing metadata
|
|
|
|
// (like Properties blocks). Unlike data blocks, the keys for these blocks
|
|
|
|
// do not contain sequence numbers, do not use a user-define comparator, and
|
|
|
|
// do not track read amplification/statistics. Additionally, MetaBlocks will
|
|
|
|
// not assert if the block is formatted improperly.
|
|
|
|
//
|
|
|
|
// If `block_contents_pinned` is true, the caller will guarantee that when
|
|
|
|
// the cleanup functions are transferred from the iterator to other
|
|
|
|
// classes, e.g. PinnableSlice, the pointer to the bytes will still be
|
|
|
|
// valid. Either the iterator holds cache handle or ownership of some resource
|
|
|
|
// and release them in a release function, or caller is sure that the data
|
|
|
|
// will not go away (for example, it's from mmapped file which will not be
|
|
|
|
// closed).
|
|
|
|
MetaBlockIter* NewMetaIterator(bool block_contents_pinned = false);
|
|
|
|
|
2020-07-20 21:05:21 +00:00
|
|
|
// raw_ucmp is a raw (i.e., not wrapped by `UserComparatorWrapper`) user key
|
2020-07-08 00:25:08 +00:00
|
|
|
// comparator.
|
|
|
|
//
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
// key_includes_seq, default true, means that the keys are in internal key
|
|
|
|
// format.
|
|
|
|
// value_is_full, default true, means that no delta encoding is
|
|
|
|
// applied to values.
|
|
|
|
//
|
|
|
|
// If `prefix_index` is not nullptr this block will do hash lookup for the key
|
|
|
|
// prefix. If total_order_seek is true, prefix_index_ is ignored.
|
|
|
|
//
|
|
|
|
// `have_first_key` controls whether IndexValue will contain
|
|
|
|
// first_internal_key. It affects data serialization format, so the same value
|
|
|
|
// have_first_key must be used when writing and reading index.
|
|
|
|
// It is determined by IndexType property of the table.
|
2023-05-25 22:41:32 +00:00
|
|
|
// `user_defined_timestamps_persisted` controls whether a min timestamp is
|
|
|
|
// padded while key is being parsed from the block.
|
|
|
|
IndexBlockIter* NewIndexIterator(
|
|
|
|
const Comparator* raw_ucmp, SequenceNumber global_seqno,
|
|
|
|
IndexBlockIter* iter, Statistics* stats, bool total_order_seek,
|
|
|
|
bool have_first_key, bool key_includes_seq, bool value_is_full,
|
|
|
|
bool block_contents_pinned = false,
|
|
|
|
bool user_defined_timestamps_persisted = true,
|
|
|
|
BlockPrefixIndex* prefix_index = nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-08-05 18:27:34 +00:00
|
|
|
// Report an approximation of how much memory has been used.
|
|
|
|
size_t ApproximateMemoryUsage() const;
|
|
|
|
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
// For TypedCacheInterface
|
|
|
|
const Slice& ContentSlice() const { return contents_.data; }
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
// Initializes per key-value checksum protection.
|
|
|
|
// After this method is called, each DataBlockIterator returned
|
|
|
|
// by NewDataIterator will verify per key-value checksum for any key it read.
|
|
|
|
void InitializeDataBlockProtectionInfo(uint8_t protection_bytes_per_key,
|
|
|
|
const Comparator* raw_ucmp);
|
|
|
|
|
|
|
|
// Initializes per key-value checksum protection.
|
|
|
|
// After this method is called, each IndexBlockIterator returned
|
|
|
|
// by NewIndexIterator will verify per key-value checksum for any key it read.
|
|
|
|
// value_is_full and index_has_first_key are needed to be able to parse
|
|
|
|
// the index block content and construct checksums.
|
|
|
|
void InitializeIndexBlockProtectionInfo(uint8_t protection_bytes_per_key,
|
|
|
|
const Comparator* raw_ucmp,
|
|
|
|
bool value_is_full,
|
|
|
|
bool index_has_first_key);
|
|
|
|
|
|
|
|
// Initializes per key-value checksum protection.
|
|
|
|
// After this method is called, each MetaBlockIter returned
|
|
|
|
// by NewMetaIterator will verify per key-value checksum for any key it read.
|
|
|
|
void InitializeMetaIndexBlockProtectionInfo(uint8_t protection_bytes_per_key);
|
|
|
|
|
|
|
|
static void GenerateKVChecksum(char* checksum_ptr, uint8_t checksum_len,
|
|
|
|
const Slice& key, const Slice& value) {
|
|
|
|
ProtectionInfo64().ProtectKV(key, value).Encode(checksum_len, checksum_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* TEST_GetKVChecksum() const { return kv_checksum_; }
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2014-08-15 22:05:09 +00:00
|
|
|
BlockContents contents_;
|
2019-03-27 23:13:08 +00:00
|
|
|
const char* data_; // contents_.data.data()
|
|
|
|
size_t size_; // contents_.data.size()
|
|
|
|
uint32_t restart_offset_; // Offset in data_ of restart array
|
2018-05-18 19:54:09 +00:00
|
|
|
uint32_t num_restarts_;
|
2016-08-27 01:55:58 +00:00
|
|
|
std::unique_ptr<BlockReadAmpBitmap> read_amp_bitmap_;
|
2023-04-25 19:08:23 +00:00
|
|
|
char* kv_checksum_{nullptr};
|
|
|
|
uint32_t checksum_size_{0};
|
|
|
|
// Used by block iterators to calculate current key index within a block
|
|
|
|
uint32_t block_restart_interval_{0};
|
|
|
|
uint8_t protection_bytes_per_key_{0};
|
2018-08-15 21:27:47 +00:00
|
|
|
DataBlockHashIndex data_block_hash_index_;
|
2014-07-30 23:34:35 +00:00
|
|
|
};
|
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
// A `BlockIter` iterates over the entries in a `Block`'s data buffer. The
|
|
|
|
// format of this data buffer is an uncompressed, sorted sequence of key-value
|
|
|
|
// pairs (see `Block` API for more details).
|
|
|
|
//
|
|
|
|
// Notably, the keys may either be in internal key format or user key format.
|
|
|
|
// Subclasses are responsible for configuring the key format.
|
|
|
|
//
|
|
|
|
// `BlockIter` intends to provide final overrides for all of
|
|
|
|
// `InternalIteratorBase` functions that can move the iterator. It does
|
|
|
|
// this to guarantee `UpdateKey()` is called exactly once after each key
|
|
|
|
// movement potentially visible to users. In this step, the key is prepared
|
|
|
|
// (e.g., serialized if global seqno is in effect) so it can be returned
|
|
|
|
// immediately when the user asks for it via calling `key() const`.
|
|
|
|
//
|
|
|
|
// For its subclasses, it provides protected variants of the above-mentioned
|
|
|
|
// final-overridden methods. They are named with the "Impl" suffix, e.g.,
|
|
|
|
// `Seek()` logic would be implemented by subclasses in `SeekImpl()`. These
|
|
|
|
// "Impl" functions are responsible for positioning `raw_key_` but not
|
|
|
|
// invoking `UpdateKey()`.
|
2023-04-25 19:08:23 +00:00
|
|
|
//
|
|
|
|
// Per key-value checksum is enabled if relevant states are passed in during
|
|
|
|
// `InitializeBase()`. The checksum verification is done in each call to
|
|
|
|
// UpdateKey() for the current key. Each subclass is responsible for keeping
|
|
|
|
// track of cur_entry_idx_, the index of the current key within the block.
|
|
|
|
// BlockIter uses this index to get the corresponding checksum for current key.
|
|
|
|
// Additional checksum verification may be done in subclasses if they read keys
|
|
|
|
// other than the key being processed in UpdateKey().
|
2018-08-09 23:49:45 +00:00
|
|
|
template <class TValue>
|
|
|
|
class BlockIter : public InternalIteratorBase<TValue> {
|
2014-07-30 23:34:35 +00:00
|
|
|
public:
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
// Makes Valid() return false, status() return `s`, and Seek()/Prev()/etc do
|
2018-06-05 18:32:07 +00:00
|
|
|
// nothing. Calls cleanup functions.
|
2021-12-21 19:29:52 +00:00
|
|
|
virtual void Invalidate(const Status& s) {
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
// Assert that the BlockIter is never deleted while Pinning is Enabled.
|
2021-12-21 19:29:52 +00:00
|
|
|
assert(!pinned_iters_mgr_ || !pinned_iters_mgr_->PinningEnabled());
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
|
|
|
|
data_ = nullptr;
|
|
|
|
current_ = restarts_;
|
2014-07-30 23:34:35 +00:00
|
|
|
status_ = s;
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
|
2018-06-05 18:32:07 +00:00
|
|
|
// Call cleanup callbacks.
|
|
|
|
Cleanable::Reset();
|
2014-07-30 23:34:35 +00:00
|
|
|
}
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
bool Valid() const override {
|
|
|
|
// When status_ is not ok, iter should be invalid.
|
|
|
|
assert(status_.ok() || current_ >= restarts_);
|
|
|
|
return current_ < restarts_;
|
|
|
|
}
|
2020-07-08 00:25:08 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
void SeekToFirst() override final {
|
2023-04-25 19:08:23 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (TEST_Corrupt_Callback("BlockIter::SeekToFirst")) return;
|
|
|
|
#endif
|
2020-07-08 00:25:08 +00:00
|
|
|
SeekToFirstImpl();
|
|
|
|
UpdateKey();
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
void SeekToLast() override final {
|
2020-07-08 00:25:08 +00:00
|
|
|
SeekToLastImpl();
|
|
|
|
UpdateKey();
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
void Seek(const Slice& target) override final {
|
2020-07-08 00:25:08 +00:00
|
|
|
SeekImpl(target);
|
|
|
|
UpdateKey();
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
void SeekForPrev(const Slice& target) override final {
|
2020-07-08 00:25:08 +00:00
|
|
|
SeekForPrevImpl(target);
|
|
|
|
UpdateKey();
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
void Next() override final {
|
2020-07-08 00:25:08 +00:00
|
|
|
NextImpl();
|
|
|
|
UpdateKey();
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
bool NextAndGetResult(IterateResult* result) override final {
|
2020-07-08 00:25:08 +00:00
|
|
|
// This does not need to call `UpdateKey()` as the parent class only has
|
|
|
|
// access to the `UpdateKey()`-invoking functions.
|
|
|
|
return InternalIteratorBase<TValue>::NextAndGetResult(result);
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
void Prev() override final {
|
2020-07-08 00:25:08 +00:00
|
|
|
PrevImpl();
|
|
|
|
UpdateKey();
|
|
|
|
}
|
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
Status status() const override { return status_; }
|
2023-04-25 19:08:23 +00:00
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
Slice key() const override {
|
2014-07-30 23:34:35 +00:00
|
|
|
assert(Valid());
|
2020-05-28 17:49:02 +00:00
|
|
|
return key_;
|
2014-07-30 23:34:35 +00:00
|
|
|
}
|
|
|
|
|
2016-04-26 19:41:07 +00:00
|
|
|
#ifndef NDEBUG
|
2020-02-21 23:07:55 +00:00
|
|
|
~BlockIter() override {
|
2016-04-26 19:41:07 +00:00
|
|
|
// Assert that the BlockIter is never deleted while Pinning is Enabled.
|
|
|
|
assert(!pinned_iters_mgr_ ||
|
|
|
|
(pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled()));
|
2020-08-21 02:16:56 +00:00
|
|
|
status_.PermitUncheckedError();
|
2015-12-16 20:08:30 +00:00
|
|
|
}
|
2023-04-25 19:08:23 +00:00
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
|
2016-04-26 19:41:07 +00:00
|
|
|
pinned_iters_mgr_ = pinned_iters_mgr;
|
2015-12-16 20:08:30 +00:00
|
|
|
}
|
2023-04-25 19:08:23 +00:00
|
|
|
|
2016-04-26 19:41:07 +00:00
|
|
|
PinnedIteratorsManager* pinned_iters_mgr_ = nullptr;
|
2023-04-25 19:08:23 +00:00
|
|
|
|
|
|
|
bool TEST_Corrupt_Callback(const std::string& sync_point) {
|
|
|
|
bool corrupt = false;
|
|
|
|
TEST_SYNC_POINT_CALLBACK(sync_point, static_cast<void*>(&corrupt));
|
|
|
|
|
|
|
|
if (corrupt) {
|
|
|
|
CorruptionError();
|
|
|
|
}
|
|
|
|
return corrupt;
|
|
|
|
}
|
2016-04-26 19:41:07 +00:00
|
|
|
#endif
|
2015-12-16 20:08:30 +00:00
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
bool IsKeyPinned() const override {
|
Copy Get() result when file reads use mmap
Summary:
For iterator reads, a `SuperVersion` is pinned to preserve a snapshot of SST files, and `Block`s are pinned to allow `key()` and `value()` to return pointers directly into a RocksDB memory region. This works for both non-mmap reads, where the block owns the memory region, and mmap reads, where the file owns the memory region.
For point reads with `PinnableSlice`, only the `Block` object is pinned. This works for non-mmap reads because the block owns the memory region, so even if the file is deleted after compaction, the memory region survives. However, for mmap reads, file deletion causes the memory region to which the `PinnableSlice` refers to be unmapped. The result is usually a segfault upon accessing the `PinnableSlice`, although sometimes it returned wrong results (I repro'd this a bunch of times with `db_stress`).
This PR copies the value into the `PinnableSlice` when it comes from mmap'd memory. We can tell whether the `Block` owns its memory using `Block::cachable()`, which is unset when reads do not use the provided buffer as is the case with mmap file reads. When that is false we ensure the result of `Get()` is copied.
This feels like a short-term solution as ideally we'd have the `PinnableSlice` pin the mmap'd memory so we can do zero-copy reads. It seemed hard so I chose this approach to fix correctness in the meantime.
Closes https://github.com/facebook/rocksdb/pull/3881
Differential Revision: D8076288
Pulled By: ajkr
fbshipit-source-id: 31d78ec010198723522323dbc6ea325122a46b08
2018-06-01 23:46:32 +00:00
|
|
|
return block_contents_pinned_ && key_pinned_;
|
|
|
|
}
|
2015-12-16 20:08:30 +00:00
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
bool IsValuePinned() const override { return block_contents_pinned_; }
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 16:49:03 +00:00
|
|
|
|
2016-08-27 01:55:58 +00:00
|
|
|
size_t TEST_CurrentEntrySize() { return NextEntryOffset() - current_; }
|
|
|
|
|
2016-10-18 23:59:37 +00:00
|
|
|
uint32_t ValueOffset() const {
|
|
|
|
return static_cast<uint32_t>(value_.data() - data_);
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:28:18 +00:00
|
|
|
void SetCacheHandle(Cache::Handle* handle) { cache_handle_ = handle; }
|
|
|
|
|
|
|
|
Cache::Handle* cache_handle() { return cache_handle_; }
|
|
|
|
|
2018-07-13 00:19:57 +00:00
|
|
|
protected:
|
2022-05-04 00:37:19 +00:00
|
|
|
std::unique_ptr<InternalKeyComparator> icmp_;
|
2014-07-30 23:34:35 +00:00
|
|
|
const char* data_; // underlying block contents
|
|
|
|
uint32_t num_restarts_; // Number of uint32_t entries in restart array
|
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
// Index of restart block in which current_ or current_-1 falls
|
|
|
|
uint32_t restart_index_;
|
2019-03-27 23:13:08 +00:00
|
|
|
uint32_t restarts_; // Offset of restart array (list of fixed32)
|
2014-07-30 23:34:35 +00:00
|
|
|
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
|
|
|
|
uint32_t current_;
|
2020-05-28 17:49:02 +00:00
|
|
|
// Raw key from block.
|
|
|
|
IterKey raw_key_;
|
2020-07-08 00:25:08 +00:00
|
|
|
// Buffer for key data when global seqno assignment is enabled.
|
|
|
|
IterKey key_buf_;
|
2014-07-30 23:34:35 +00:00
|
|
|
Slice value_;
|
|
|
|
Status status_;
|
2020-07-08 00:25:08 +00:00
|
|
|
// Key to be exposed to users.
|
|
|
|
Slice key_;
|
2023-04-25 19:08:23 +00:00
|
|
|
SequenceNumber global_seqno_;
|
2023-05-25 22:41:32 +00:00
|
|
|
// Size of the user-defined timestamp.
|
|
|
|
size_t ts_sz_ = 0;
|
|
|
|
// If user-defined timestamp is enabled but not persisted. A min timestamp
|
|
|
|
// will be padded to the key during key parsing where it applies. Such as when
|
|
|
|
// parsing keys from data block, index block, parsing the first internal
|
|
|
|
// key from IndexValue entry. Min timestamp padding is different for when
|
|
|
|
// `raw_key_` is a user key vs is an internal key.
|
|
|
|
//
|
|
|
|
// This only applies to data block and index blocks including index block for
|
|
|
|
// data blocks, index block for partitioned filter blocks, index block for
|
|
|
|
// partitioned index blocks. In summary, this only applies to block whose key
|
|
|
|
// are real user keys or internal keys created from user keys.
|
|
|
|
bool pad_min_timestamp_;
|
2023-04-25 19:08:23 +00:00
|
|
|
|
|
|
|
// Per key-value checksum related states
|
|
|
|
const char* kv_checksum_;
|
|
|
|
int32_t cur_entry_idx_;
|
|
|
|
uint32_t block_restart_interval_;
|
|
|
|
uint8_t protection_bytes_per_key_;
|
|
|
|
|
2016-06-14 19:27:46 +00:00
|
|
|
bool key_pinned_;
|
2018-11-14 01:00:49 +00:00
|
|
|
// Whether the block data is guaranteed to outlive this iterator, and
|
|
|
|
// as long as the cleanup functions are transferred to another class,
|
|
|
|
// e.g. PinnableSlice, the pointer to the bytes will still be valid.
|
2018-07-13 00:19:57 +00:00
|
|
|
bool block_contents_pinned_;
|
2016-06-14 19:27:46 +00:00
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
virtual void SeekToFirstImpl() = 0;
|
|
|
|
virtual void SeekToLastImpl() = 0;
|
|
|
|
virtual void SeekImpl(const Slice& target) = 0;
|
|
|
|
virtual void SeekForPrevImpl(const Slice& target) = 0;
|
|
|
|
virtual void NextImpl() = 0;
|
|
|
|
virtual void PrevImpl() = 0;
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
// Returns the restart interval of this block.
|
|
|
|
// Returns 0 if num_restarts_ <= 1 or if the BlockIter is not initialized.
|
|
|
|
virtual uint32_t GetRestartInterval() {
|
|
|
|
if (num_restarts_ <= 1 || data_ == nullptr) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
SeekToFirstImpl();
|
|
|
|
uint32_t end_index = GetRestartPoint(1);
|
|
|
|
uint32_t count = 1;
|
|
|
|
while (NextEntryOffset() < end_index && status_.ok()) {
|
|
|
|
assert(Valid());
|
|
|
|
NextImpl();
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the number of keys in this block.
|
|
|
|
virtual uint32_t NumberOfKeys(uint32_t block_restart_interval) {
|
|
|
|
if (num_restarts_ == 0 || data_ == nullptr) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
uint32_t count = (num_restarts_ - 1) * block_restart_interval;
|
|
|
|
// Add number of keys from the last restart interval
|
|
|
|
SeekToRestartPoint(num_restarts_ - 1);
|
|
|
|
while (NextEntryOffset() < restarts_ && status_.ok()) {
|
|
|
|
NextImpl();
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stores whether the current key has a shared bytes with prev key in
|
|
|
|
// *is_shared.
|
|
|
|
// Sets raw_key_, value_ to the current parsed key and value.
|
|
|
|
// Sets restart_index_ to point to the restart interval that contains
|
|
|
|
// the current key.
|
2021-12-21 19:29:52 +00:00
|
|
|
template <typename DecodeEntryFunc>
|
|
|
|
inline bool ParseNextKey(bool* is_shared);
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
// protection_bytes_per_key, kv_checksum, and block_restart_interval
|
|
|
|
// are needed only for per kv checksum verification.
|
2022-05-04 00:37:19 +00:00
|
|
|
void InitializeBase(const Comparator* raw_ucmp, const char* data,
|
|
|
|
uint32_t restarts, uint32_t num_restarts,
|
2023-04-25 19:08:23 +00:00
|
|
|
SequenceNumber global_seqno, bool block_contents_pinned,
|
2023-05-25 22:41:32 +00:00
|
|
|
bool user_defined_timestamp_persisted,
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
uint8_t protection_bytes_per_key, const char* kv_checksum,
|
|
|
|
uint32_t block_restart_interval) {
|
2022-05-04 00:37:19 +00:00
|
|
|
assert(data_ == nullptr); // Ensure it is called only once
|
|
|
|
assert(num_restarts > 0); // Ensure the param is valid
|
2020-07-20 21:05:21 +00:00
|
|
|
|
2022-07-12 20:30:35 +00:00
|
|
|
icmp_ = std::make_unique<InternalKeyComparator>(raw_ucmp);
|
2022-05-04 00:37:19 +00:00
|
|
|
data_ = data;
|
|
|
|
restarts_ = restarts;
|
|
|
|
num_restarts_ = num_restarts;
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
global_seqno_ = global_seqno;
|
2023-05-25 22:41:32 +00:00
|
|
|
if (raw_ucmp != nullptr) {
|
|
|
|
ts_sz_ = raw_ucmp->timestamp_size();
|
|
|
|
}
|
|
|
|
pad_min_timestamp_ = ts_sz_ > 0 && !user_defined_timestamp_persisted;
|
2022-05-04 00:37:19 +00:00
|
|
|
block_contents_pinned_ = block_contents_pinned;
|
|
|
|
cache_handle_ = nullptr;
|
2023-04-25 19:08:23 +00:00
|
|
|
cur_entry_idx_ = -1;
|
|
|
|
protection_bytes_per_key_ = protection_bytes_per_key;
|
|
|
|
kv_checksum_ = kv_checksum;
|
|
|
|
block_restart_interval_ = block_restart_interval;
|
|
|
|
// Checksum related states are either all 0/nullptr or all non-zero.
|
|
|
|
// One exception is when num_restarts == 0, block_restart_interval can be 0
|
|
|
|
// since we are not able to compute it.
|
|
|
|
assert((protection_bytes_per_key == 0 && kv_checksum == nullptr) ||
|
|
|
|
(protection_bytes_per_key > 0 && kv_checksum != nullptr &&
|
|
|
|
(block_restart_interval > 0 || num_restarts == 1)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void CorruptionError(const std::string& error_msg = "bad entry in block") {
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
status_ = Status::Corruption(error_msg);
|
|
|
|
raw_key_.Clear();
|
|
|
|
value_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void PerKVChecksumCorruptionError() {
|
|
|
|
std::string error_msg{
|
|
|
|
"Corrupted block entry: per key-value checksum verification "
|
|
|
|
"failed."};
|
|
|
|
error_msg.append(" Offset: " + std::to_string(current_) + ".");
|
|
|
|
error_msg.append(" Entry index: " + std::to_string(cur_entry_idx_) + ".");
|
|
|
|
CorruptionError(error_msg);
|
2022-05-04 00:37:19 +00:00
|
|
|
}
|
2020-07-20 21:05:21 +00:00
|
|
|
|
2023-05-25 22:41:32 +00:00
|
|
|
void UpdateRawKeyAndMaybePadMinTimestamp(const Slice& key) {
|
|
|
|
if (pad_min_timestamp_) {
|
|
|
|
std::string buf;
|
|
|
|
if (raw_key_.IsUserKey()) {
|
|
|
|
AppendKeyWithMinTimestamp(&buf, key, ts_sz_);
|
|
|
|
} else {
|
|
|
|
PadInternalKeyWithMinTimestamp(&buf, key, ts_sz_);
|
|
|
|
}
|
|
|
|
raw_key_.SetKey(buf, true /* copy */);
|
|
|
|
} else {
|
|
|
|
raw_key_.SetKey(key, false /* copy */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
// Must be called every time a key is found that needs to be returned to user,
|
|
|
|
// and may be called when no key is found (as a no-op). Updates `key_`,
|
|
|
|
// `key_buf_`, and `key_pinned_` with info about the found key.
|
2023-04-25 19:08:23 +00:00
|
|
|
// Per key-value checksum verification is done if available for the key to be
|
|
|
|
// returned. Iterator is invalidated with corruption status if checksum
|
|
|
|
// verification fails.
|
2020-07-08 00:25:08 +00:00
|
|
|
void UpdateKey() {
|
|
|
|
key_buf_.Clear();
|
|
|
|
if (!Valid()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (raw_key_.IsUserKey()) {
|
|
|
|
assert(global_seqno_ == kDisableGlobalSequenceNumber);
|
|
|
|
key_ = raw_key_.GetUserKey();
|
|
|
|
key_pinned_ = raw_key_.IsKeyPinned();
|
|
|
|
} else if (global_seqno_ == kDisableGlobalSequenceNumber) {
|
|
|
|
key_ = raw_key_.GetInternalKey();
|
|
|
|
key_pinned_ = raw_key_.IsKeyPinned();
|
|
|
|
} else {
|
|
|
|
key_buf_.SetInternalKey(raw_key_.GetUserKey(), global_seqno_,
|
|
|
|
ExtractValueType(raw_key_.GetInternalKey()));
|
|
|
|
key_ = key_buf_.GetInternalKey();
|
|
|
|
key_pinned_ = false;
|
|
|
|
}
|
2023-04-25 19:08:23 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("BlockIter::UpdateKey::value",
|
|
|
|
(void*)value_.data());
|
|
|
|
TEST_SYNC_POINT_CALLBACK("Block::VerifyChecksum::checksum_len",
|
|
|
|
&protection_bytes_per_key_);
|
|
|
|
if (protection_bytes_per_key_ > 0) {
|
|
|
|
if (!ProtectionInfo64()
|
|
|
|
.ProtectKV(raw_key_.GetKey(), value_)
|
|
|
|
.Verify(
|
|
|
|
protection_bytes_per_key_,
|
|
|
|
kv_checksum_ + protection_bytes_per_key_ * cur_entry_idx_)) {
|
|
|
|
PerKVChecksumCorruptionError();
|
|
|
|
}
|
|
|
|
}
|
2020-07-08 00:25:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the result of `Comparator::Compare()`, where the appropriate
|
|
|
|
// comparator is used for the block contents, the LHS argument is the current
|
|
|
|
// key with global seqno applied, and the RHS argument is `other`.
|
|
|
|
int CompareCurrentKey(const Slice& other) {
|
|
|
|
if (raw_key_.IsUserKey()) {
|
|
|
|
assert(global_seqno_ == kDisableGlobalSequenceNumber);
|
2022-05-04 00:37:19 +00:00
|
|
|
return icmp_->user_comparator()->Compare(raw_key_.GetUserKey(), other);
|
2020-07-08 00:25:08 +00:00
|
|
|
} else if (global_seqno_ == kDisableGlobalSequenceNumber) {
|
2022-05-04 00:37:19 +00:00
|
|
|
return icmp_->Compare(raw_key_.GetInternalKey(), other);
|
2020-07-08 00:25:08 +00:00
|
|
|
}
|
2022-05-04 00:37:19 +00:00
|
|
|
return icmp_->Compare(raw_key_.GetInternalKey(), global_seqno_, other,
|
2020-07-20 21:05:21 +00:00
|
|
|
kDisableGlobalSequenceNumber);
|
2020-07-08 00:25:08 +00:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:28:18 +00:00
|
|
|
private:
|
|
|
|
// Store the cache handle, if the block is cached. We need this since the
|
|
|
|
// only other place the handle is stored is as an argument to the Cleanable
|
|
|
|
// function callback, which is hard to retrieve. When multiple value
|
|
|
|
// PinnableSlices reference the block, they need the cache handle in order
|
|
|
|
// to bump up the ref count
|
|
|
|
Cache::Handle* cache_handle_;
|
|
|
|
|
2018-07-13 00:19:57 +00:00
|
|
|
public:
|
2014-07-30 23:34:35 +00:00
|
|
|
// Return the offset in data_ just past the end of the current entry.
|
|
|
|
inline uint32_t NextEntryOffset() const {
|
2016-11-04 18:19:09 +00:00
|
|
|
// NOTE: We don't support blocks bigger than 2GB
|
2014-11-11 21:47:22 +00:00
|
|
|
return static_cast<uint32_t>((value_.data() + value_.size()) - data_);
|
2014-07-30 23:34:35 +00:00
|
|
|
}
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
uint32_t GetRestartPoint(uint32_t index) const {
|
2014-07-30 23:34:35 +00:00
|
|
|
assert(index < num_restarts_);
|
|
|
|
return DecodeFixed32(data_ + restarts_ + index * sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SeekToRestartPoint(uint32_t index) {
|
2020-05-28 17:49:02 +00:00
|
|
|
raw_key_.Clear();
|
2014-07-30 23:34:35 +00:00
|
|
|
restart_index_ = index;
|
|
|
|
// current_ will be fixed by ParseNextKey();
|
|
|
|
|
|
|
|
// ParseNextKey() starts at the end of value_, so set value_ accordingly
|
|
|
|
uint32_t offset = GetRestartPoint(index);
|
|
|
|
value_ = Slice(data_ + offset, 0);
|
|
|
|
}
|
|
|
|
|
2020-06-10 20:56:25 +00:00
|
|
|
protected:
|
2018-08-09 23:49:45 +00:00
|
|
|
template <typename DecodeKeyFunc>
|
2020-07-09 19:25:40 +00:00
|
|
|
inline bool BinarySeek(const Slice& target, uint32_t* index,
|
|
|
|
bool* is_index_key_result);
|
2020-06-10 20:56:25 +00:00
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
// Find the first key in restart interval `index` that is >= `target`.
|
|
|
|
// If there is no such key, iterator is positioned at the first key in
|
|
|
|
// restart interval `index + 1`.
|
|
|
|
// If is_index_key_result is true, it positions the iterator at the first key
|
|
|
|
// in this restart interval.
|
|
|
|
// Per key-value checksum verification is done for all keys scanned
|
|
|
|
// up to but not including the last key (the key that current_ points to
|
|
|
|
// when this function returns). This key's checksum is verified in
|
|
|
|
// UpdateKey().
|
2020-06-10 20:56:25 +00:00
|
|
|
void FindKeyAfterBinarySeek(const Slice& target, uint32_t index,
|
2020-07-08 00:25:08 +00:00
|
|
|
bool is_index_key_result);
|
2018-07-13 00:19:57 +00:00
|
|
|
};
|
2014-07-30 23:34:35 +00:00
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
class DataBlockIter final : public BlockIter<Slice> {
|
2018-07-13 00:19:57 +00:00
|
|
|
public:
|
|
|
|
DataBlockIter()
|
|
|
|
: BlockIter(), read_amp_bitmap_(nullptr), last_bitmap_offset_(0) {}
|
2020-07-20 21:05:21 +00:00
|
|
|
void Initialize(const Comparator* raw_ucmp, const char* data,
|
|
|
|
uint32_t restarts, uint32_t num_restarts,
|
|
|
|
SequenceNumber global_seqno,
|
2018-07-13 00:19:57 +00:00
|
|
|
BlockReadAmpBitmap* read_amp_bitmap,
|
2018-08-15 21:27:47 +00:00
|
|
|
bool block_contents_pinned,
|
2023-05-25 22:41:32 +00:00
|
|
|
bool user_defined_timestamps_persisted,
|
2023-04-25 19:08:23 +00:00
|
|
|
DataBlockHashIndex* data_block_hash_index,
|
|
|
|
uint8_t protection_bytes_per_key, const char* kv_checksum,
|
|
|
|
uint32_t block_restart_interval) {
|
2020-07-20 21:05:21 +00:00
|
|
|
InitializeBase(raw_ucmp, data, restarts, num_restarts, global_seqno,
|
2023-05-25 22:41:32 +00:00
|
|
|
block_contents_pinned, user_defined_timestamps_persisted,
|
|
|
|
protection_bytes_per_key, kv_checksum,
|
2023-04-25 19:08:23 +00:00
|
|
|
block_restart_interval);
|
2020-05-28 17:49:02 +00:00
|
|
|
raw_key_.SetIsUserKey(false);
|
2018-07-13 00:19:57 +00:00
|
|
|
read_amp_bitmap_ = read_amp_bitmap;
|
|
|
|
last_bitmap_offset_ = current_ + 1;
|
2018-08-15 21:27:47 +00:00
|
|
|
data_block_hash_index_ = data_block_hash_index;
|
2018-07-13 00:19:57 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
Slice value() const override {
|
2018-07-13 00:19:57 +00:00
|
|
|
assert(Valid());
|
|
|
|
if (read_amp_bitmap_ && current_ < restarts_ &&
|
|
|
|
current_ != last_bitmap_offset_) {
|
|
|
|
read_amp_bitmap_->Mark(current_ /* current entry offset */,
|
|
|
|
NextEntryOffset() - 1);
|
|
|
|
last_bitmap_offset_ = current_;
|
|
|
|
}
|
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
// Returns if `target` may exist.
|
2018-08-15 21:27:47 +00:00
|
|
|
inline bool SeekForGet(const Slice& target) {
|
2023-04-25 19:08:23 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (TEST_Corrupt_Callback("DataBlockIter::SeekForGet")) return true;
|
|
|
|
#endif
|
2018-08-15 21:27:47 +00:00
|
|
|
if (!data_block_hash_index_) {
|
2020-07-08 00:25:08 +00:00
|
|
|
SeekImpl(target);
|
|
|
|
UpdateKey();
|
2018-08-15 21:27:47 +00:00
|
|
|
return true;
|
|
|
|
}
|
2020-07-08 00:25:08 +00:00
|
|
|
bool res = SeekForGetImpl(target);
|
|
|
|
UpdateKey();
|
|
|
|
return res;
|
2018-08-15 21:27:47 +00:00
|
|
|
}
|
|
|
|
|
2021-12-21 19:29:52 +00:00
|
|
|
void Invalidate(const Status& s) override {
|
|
|
|
BlockIter::Invalidate(s);
|
2018-07-17 00:10:44 +00:00
|
|
|
// Clear prev entries cache.
|
|
|
|
prev_entries_keys_buff_.clear();
|
|
|
|
prev_entries_.clear();
|
|
|
|
prev_entries_idx_ = -1;
|
|
|
|
}
|
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
protected:
|
2021-12-21 19:29:52 +00:00
|
|
|
friend Block;
|
|
|
|
inline bool ParseNextDataKey(bool* is_shared);
|
|
|
|
void SeekToFirstImpl() override;
|
|
|
|
void SeekToLastImpl() override;
|
|
|
|
void SeekImpl(const Slice& target) override;
|
|
|
|
void SeekForPrevImpl(const Slice& target) override;
|
|
|
|
void NextImpl() override;
|
|
|
|
void PrevImpl() override;
|
2020-07-08 00:25:08 +00:00
|
|
|
|
2018-07-13 00:19:57 +00:00
|
|
|
private:
|
|
|
|
// read-amp bitmap
|
|
|
|
BlockReadAmpBitmap* read_amp_bitmap_;
|
|
|
|
// last `current_` value we report to read-amp bitmp
|
|
|
|
mutable uint32_t last_bitmap_offset_;
|
2018-07-17 00:10:44 +00:00
|
|
|
struct CachedPrevEntry {
|
|
|
|
explicit CachedPrevEntry(uint32_t _offset, const char* _key_ptr,
|
|
|
|
size_t _key_offset, size_t _key_size, Slice _value)
|
|
|
|
: offset(_offset),
|
|
|
|
key_ptr(_key_ptr),
|
|
|
|
key_offset(_key_offset),
|
|
|
|
key_size(_key_size),
|
|
|
|
value(_value) {}
|
|
|
|
|
|
|
|
// offset of entry in block
|
|
|
|
uint32_t offset;
|
|
|
|
// Pointer to key data in block (nullptr if key is delta-encoded)
|
|
|
|
const char* key_ptr;
|
|
|
|
// offset of key in prev_entries_keys_buff_ (0 if key_ptr is not nullptr)
|
|
|
|
size_t key_offset;
|
|
|
|
// size of key
|
|
|
|
size_t key_size;
|
|
|
|
// value slice pointing to data in block
|
|
|
|
Slice value;
|
|
|
|
};
|
|
|
|
std::string prev_entries_keys_buff_;
|
|
|
|
std::vector<CachedPrevEntry> prev_entries_;
|
|
|
|
int32_t prev_entries_idx_ = -1;
|
|
|
|
|
2018-08-15 21:27:47 +00:00
|
|
|
DataBlockHashIndex* data_block_hash_index_;
|
|
|
|
|
|
|
|
bool SeekForGetImpl(const Slice& target);
|
2021-12-21 19:29:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Iterator over MetaBlocks. MetaBlocks are similar to Data Blocks and
|
|
|
|
// are used to store Properties associated with table.
|
|
|
|
// Meta blocks always store user keys (no sequence number) and always
|
|
|
|
// use the BytewiseComparator. Additionally, MetaBlock accesses are
|
|
|
|
// not recorded in the Statistics or for Read-Amplification.
|
|
|
|
class MetaBlockIter final : public BlockIter<Slice> {
|
|
|
|
public:
|
|
|
|
MetaBlockIter() : BlockIter() { raw_key_.SetIsUserKey(true); }
|
|
|
|
void Initialize(const char* data, uint32_t restarts, uint32_t num_restarts,
|
2023-04-25 19:08:23 +00:00
|
|
|
bool block_contents_pinned, uint8_t protection_bytes_per_key,
|
|
|
|
const char* kv_checksum, uint32_t block_restart_interval) {
|
2021-12-21 19:29:52 +00:00
|
|
|
// Initializes the iterator with a BytewiseComparator and
|
|
|
|
// the raw key being a user key.
|
|
|
|
InitializeBase(BytewiseComparator(), data, restarts, num_restarts,
|
2023-04-25 19:08:23 +00:00
|
|
|
kDisableGlobalSequenceNumber, block_contents_pinned,
|
2023-05-25 22:41:32 +00:00
|
|
|
/* user_defined_timestamps_persisted */ true,
|
2023-04-25 19:08:23 +00:00
|
|
|
protection_bytes_per_key, kv_checksum,
|
|
|
|
block_restart_interval);
|
2021-12-21 19:29:52 +00:00
|
|
|
raw_key_.SetIsUserKey(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice value() const override {
|
|
|
|
assert(Valid());
|
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
2023-04-25 19:08:23 +00:00
|
|
|
friend Block;
|
2021-12-21 19:29:52 +00:00
|
|
|
void SeekToFirstImpl() override;
|
|
|
|
void SeekToLastImpl() override;
|
|
|
|
void SeekImpl(const Slice& target) override;
|
|
|
|
void SeekForPrevImpl(const Slice& target) override;
|
|
|
|
void NextImpl() override;
|
|
|
|
void PrevImpl() override;
|
2023-04-25 19:08:23 +00:00
|
|
|
// Meta index block's restart interval is always 1. See
|
|
|
|
// MetaIndexBuilder::MetaIndexBuilder() for hard-coded restart interval.
|
|
|
|
uint32_t GetRestartInterval() override { return 1; }
|
|
|
|
uint32_t NumberOfKeys(uint32_t) override { return num_restarts_; }
|
2018-07-13 00:19:57 +00:00
|
|
|
};
|
2014-07-30 23:34:35 +00:00
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
class IndexBlockIter final : public BlockIter<IndexValue> {
|
2018-07-13 00:19:57 +00:00
|
|
|
public:
|
|
|
|
IndexBlockIter() : BlockIter(), prefix_index_(nullptr) {}
|
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
// key_includes_seq, default true, means that the keys are in internal key
|
|
|
|
// format.
|
2018-10-05 03:44:43 +00:00
|
|
|
// value_is_full, default true, means that no delta encoding is
|
2018-08-09 23:49:45 +00:00
|
|
|
// applied to values.
|
2020-07-20 21:05:21 +00:00
|
|
|
void Initialize(const Comparator* raw_ucmp, const char* data,
|
|
|
|
uint32_t restarts, uint32_t num_restarts,
|
|
|
|
SequenceNumber global_seqno, BlockPrefixIndex* prefix_index,
|
|
|
|
bool have_first_key, bool key_includes_seq,
|
2023-04-25 19:08:23 +00:00
|
|
|
bool value_is_full, bool block_contents_pinned,
|
2023-05-25 22:41:32 +00:00
|
|
|
bool user_defined_timestamps_persisted,
|
2023-04-25 19:08:23 +00:00
|
|
|
uint8_t protection_bytes_per_key, const char* kv_checksum,
|
|
|
|
uint32_t block_restart_interval) {
|
2020-07-20 21:05:21 +00:00
|
|
|
InitializeBase(raw_ucmp, data, restarts, num_restarts,
|
2023-04-25 19:08:23 +00:00
|
|
|
kDisableGlobalSequenceNumber, block_contents_pinned,
|
2023-05-25 22:41:32 +00:00
|
|
|
user_defined_timestamps_persisted, protection_bytes_per_key,
|
|
|
|
kv_checksum, block_restart_interval);
|
2020-07-08 00:25:08 +00:00
|
|
|
raw_key_.SetIsUserKey(!key_includes_seq);
|
2018-07-13 00:19:57 +00:00
|
|
|
prefix_index_ = prefix_index;
|
2018-08-09 23:49:45 +00:00
|
|
|
value_delta_encoded_ = !value_is_full;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
have_first_key_ = have_first_key;
|
|
|
|
if (have_first_key_ && global_seqno != kDisableGlobalSequenceNumber) {
|
|
|
|
global_seqno_state_.reset(new GlobalSeqnoState(global_seqno));
|
|
|
|
} else {
|
|
|
|
global_seqno_state_.reset();
|
|
|
|
}
|
2018-08-09 23:49:45 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 18:32:03 +00:00
|
|
|
Slice user_key() const override {
|
2020-07-08 00:25:08 +00:00
|
|
|
assert(Valid());
|
|
|
|
return raw_key_.GetUserKey();
|
2019-04-16 18:32:03 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 23:07:55 +00:00
|
|
|
IndexValue value() const override {
|
2018-08-09 23:49:45 +00:00
|
|
|
assert(Valid());
|
2023-05-25 22:41:32 +00:00
|
|
|
if (value_delta_encoded_ || global_seqno_state_ != nullptr ||
|
|
|
|
pad_min_timestamp_) {
|
2018-08-09 23:49:45 +00:00
|
|
|
return decoded_value_;
|
|
|
|
} else {
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
IndexValue entry;
|
2018-08-09 23:49:45 +00:00
|
|
|
Slice v = value_;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
Status decode_s __attribute__((__unused__)) =
|
|
|
|
entry.DecodeFrom(&v, have_first_key_, nullptr);
|
2018-08-09 23:49:45 +00:00
|
|
|
assert(decode_s.ok());
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
return entry;
|
2018-08-09 23:49:45 +00:00
|
|
|
}
|
2018-07-13 00:19:57 +00:00
|
|
|
}
|
|
|
|
|
2023-04-25 19:08:23 +00:00
|
|
|
Slice raw_value() const {
|
|
|
|
assert(Valid());
|
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
bool IsValuePinned() const override {
|
|
|
|
return global_seqno_state_ != nullptr ? false : BlockIter::IsValuePinned();
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
2023-04-25 19:08:23 +00:00
|
|
|
friend Block;
|
2020-01-15 22:03:18 +00:00
|
|
|
// IndexBlockIter follows a different contract for prefix iterator
|
|
|
|
// from data iterators.
|
|
|
|
// If prefix of the seek key `target` exists in the file, it must
|
|
|
|
// return the same result as total order seek.
|
|
|
|
// If the prefix of `target` doesn't exist in the file, it can either
|
|
|
|
// return the result of total order seek, or set both of Valid() = false
|
|
|
|
// and status() = NotFound().
|
2020-07-08 00:25:08 +00:00
|
|
|
void SeekImpl(const Slice& target) override;
|
2018-07-13 00:19:57 +00:00
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
void SeekForPrevImpl(const Slice&) override {
|
2018-07-16 16:58:58 +00:00
|
|
|
assert(false);
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
status_ = Status::InvalidArgument(
|
2018-07-17 00:10:44 +00:00
|
|
|
"RocksDB internal error: should never call SeekForPrev() on index "
|
2018-07-16 16:58:58 +00:00
|
|
|
"blocks");
|
2020-05-28 17:49:02 +00:00
|
|
|
raw_key_.Clear();
|
2018-07-16 16:58:58 +00:00
|
|
|
value_.clear();
|
|
|
|
}
|
|
|
|
|
2020-07-08 00:25:08 +00:00
|
|
|
void PrevImpl() override;
|
|
|
|
void NextImpl() override;
|
|
|
|
void SeekToFirstImpl() override;
|
|
|
|
void SeekToLastImpl() override;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
|
2018-07-13 00:19:57 +00:00
|
|
|
private:
|
2018-08-09 23:49:45 +00:00
|
|
|
bool value_delta_encoded_;
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
bool have_first_key_; // value includes first_internal_key
|
2018-08-09 23:49:45 +00:00
|
|
|
BlockPrefixIndex* prefix_index_;
|
|
|
|
// Whether the value is delta encoded. In that case the value is assumed to be
|
|
|
|
// BlockHandle. The first value in each restart interval is the full encoded
|
|
|
|
// BlockHandle; the restart of encoded size part of the BlockHandle. The
|
|
|
|
// offset of delta encoded BlockHandles is computed by adding the size of
|
|
|
|
// previous delta encoded values in the same restart interval to the offset of
|
|
|
|
// the first value in that restart interval.
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
IndexValue decoded_value_;
|
|
|
|
|
|
|
|
// When sequence number overwriting is enabled, this struct contains the seqno
|
|
|
|
// to overwrite with, and current first_internal_key with overwritten seqno.
|
|
|
|
// This is rarely used, so we put it behind a pointer and only allocate when
|
|
|
|
// needed.
|
|
|
|
struct GlobalSeqnoState {
|
|
|
|
// First internal key according to current index entry, but with sequence
|
|
|
|
// number overwritten to global_seqno.
|
|
|
|
IterKey first_internal_key;
|
|
|
|
SequenceNumber global_seqno;
|
|
|
|
|
|
|
|
explicit GlobalSeqnoState(SequenceNumber seqno) : global_seqno(seqno) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<GlobalSeqnoState> global_seqno_state_;
|
2018-08-09 23:49:45 +00:00
|
|
|
|
2023-05-25 22:41:32 +00:00
|
|
|
// Buffers the `first_internal_key` referred by `decoded_value_` when
|
|
|
|
// `pad_min_timestamp_` is true.
|
|
|
|
std::string first_internal_key_with_ts_;
|
|
|
|
|
2020-01-15 22:03:18 +00:00
|
|
|
// Set *prefix_may_exist to false if no key possibly share the same prefix
|
|
|
|
// as `target`. If not set, the result position should be the same as total
|
|
|
|
// order Seek.
|
|
|
|
bool PrefixSeek(const Slice& target, uint32_t* index, bool* prefix_may_exist);
|
|
|
|
// Set *prefix_may_exist to false if no key can possibly share the same
|
|
|
|
// prefix as `target`. If not set, the result position should be the same
|
|
|
|
// as total order seek.
|
2014-07-30 23:34:35 +00:00
|
|
|
bool BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids,
|
2020-01-15 22:03:18 +00:00
|
|
|
uint32_t left, uint32_t right, uint32_t* index,
|
|
|
|
bool* prefix_may_exist);
|
2018-08-09 23:49:45 +00:00
|
|
|
inline int CompareBlockKey(uint32_t block_index, const Slice& target);
|
2014-07-30 23:34:35 +00:00
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
inline bool ParseNextIndexKey();
|
2018-07-17 00:10:44 +00:00
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
// When value_delta_encoded_ is enabled it decodes the value which is assumed
|
|
|
|
// to be BlockHandle and put it to decoded_value_
|
2021-12-21 19:29:52 +00:00
|
|
|
inline void DecodeCurrentValue(bool is_shared);
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|