2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/format.h"
|
|
|
|
|
2019-06-06 20:52:39 +00:00
|
|
|
#include <cinttypes>
|
2019-03-27 23:13:08 +00:00
|
|
|
#include <string>
|
2014-02-05 00:21:47 +00:00
|
|
|
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "block_fetcher.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/random_access_file_reader.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/memory_allocator.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
|
|
|
#include "monitoring/statistics.h"
|
2021-11-04 16:08:12 +00:00
|
|
|
#include "options/options_helper.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
#include "rocksdb/env.h"
|
2020-10-19 18:37:05 +00:00
|
|
|
#include "rocksdb/options.h"
|
2021-12-10 16:12:09 +00:00
|
|
|
#include "rocksdb/table.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block.h"
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
2015-12-16 02:20:10 +00:00
|
|
|
#include "table/persistent_cache_helper.h"
|
2021-12-10 16:12:09 +00:00
|
|
|
#include "util/cast_util.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
2015-01-09 20:57:11 +00:00
|
|
|
#include "util/compression.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/crc32c.h"
|
2021-11-04 16:08:12 +00:00
|
|
|
#include "util/hash.h"
|
2016-07-19 16:44:03 +00:00
|
|
|
#include "util/stop_watch.h"
|
2017-03-16 02:22:52 +00:00
|
|
|
#include "util/string_util.h"
|
2021-11-04 16:08:12 +00:00
|
|
|
#include "util/xxhash.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
2014-05-08 00:45:27 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
2014-05-01 18:09:32 +00:00
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
2014-05-08 00:45:27 +00:00
|
|
|
#else
|
|
|
|
// ROCKSDB_LITE doesn't have plain table
|
|
|
|
const uint64_t kLegacyPlainTableMagicNumber = 0;
|
|
|
|
const uint64_t kPlainTableMagicNumber = 0;
|
|
|
|
#endif
|
2020-11-20 06:37:55 +00:00
|
|
|
const char* kHostnameForDbHostId = "__hostname__";
|
2014-05-01 18:09:32 +00:00
|
|
|
|
2016-07-19 16:44:03 +00:00
|
|
|
bool ShouldReportDetailedTime(Env* env, Statistics* stats) {
|
|
|
|
return env != nullptr && stats != nullptr &&
|
2019-03-01 18:39:00 +00:00
|
|
|
stats->get_stats_level() > kExceptDetailedTimers;
|
2016-07-19 16:44:03 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void BlockHandle::EncodeTo(std::string* dst) const {
|
|
|
|
// Sanity check that all fields have been set
|
2021-11-04 16:08:12 +00:00
|
|
|
assert(offset_ != ~uint64_t{0});
|
|
|
|
assert(size_ != ~uint64_t{0});
|
2016-06-13 16:57:43 +00:00
|
|
|
PutVarint64Varint64(dst, offset_, size_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 16:12:09 +00:00
|
|
|
char* BlockHandle::EncodeTo(char* dst) const {
|
|
|
|
// Sanity check that all fields have been set
|
|
|
|
assert(offset_ != ~uint64_t{0});
|
|
|
|
assert(size_ != ~uint64_t{0});
|
|
|
|
char* cur = EncodeVarint64(dst, offset_);
|
|
|
|
cur = EncodeVarint64(cur, size_);
|
|
|
|
return cur;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
2019-03-27 23:13:08 +00:00
|
|
|
if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::OK();
|
|
|
|
} else {
|
2016-11-05 16:10:51 +00:00
|
|
|
// reset in case failure after partially decoding
|
|
|
|
offset_ = 0;
|
|
|
|
size_ = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::Corruption("bad block handle");
|
|
|
|
}
|
|
|
|
}
|
2014-12-23 21:24:07 +00:00
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
Status BlockHandle::DecodeSizeFrom(uint64_t _offset, Slice* input) {
|
|
|
|
if (GetVarint64(input, &size_)) {
|
|
|
|
offset_ = _offset;
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
// reset in case failure after partially decoding
|
|
|
|
offset_ = 0;
|
|
|
|
size_ = 0;
|
|
|
|
return Status::Corruption("bad block handle");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-23 21:24:07 +00:00
|
|
|
// Return a string that contains the copy of handle.
|
|
|
|
std::string BlockHandle::ToString(bool hex) const {
|
|
|
|
std::string handle_str;
|
|
|
|
EncodeTo(&handle_str);
|
|
|
|
if (hex) {
|
2016-03-30 04:25:12 +00:00
|
|
|
return Slice(handle_str).ToString(true);
|
2014-12-23 21:24:07 +00:00
|
|
|
} else {
|
|
|
|
return handle_str;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-04 23:43:09 +00:00
|
|
|
const BlockHandle BlockHandle::kNullBlockHandle(0, 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
void IndexValue::EncodeTo(std::string* dst, bool have_first_key,
|
|
|
|
const BlockHandle* previous_handle) const {
|
|
|
|
if (previous_handle) {
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
// WART: this is specific to Block-based table
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
assert(handle.offset() == previous_handle->offset() +
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
previous_handle->size() +
|
|
|
|
BlockBasedTable::kBlockTrailerSize);
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
PutVarsignedint64(dst, handle.size() - previous_handle->size());
|
|
|
|
} else {
|
|
|
|
handle.EncodeTo(dst);
|
|
|
|
}
|
|
|
|
assert(dst->size() != 0);
|
|
|
|
|
|
|
|
if (have_first_key) {
|
|
|
|
PutLengthPrefixedSlice(dst, first_internal_key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status IndexValue::DecodeFrom(Slice* input, bool have_first_key,
|
|
|
|
const BlockHandle* previous_handle) {
|
|
|
|
if (previous_handle) {
|
|
|
|
int64_t delta;
|
|
|
|
if (!GetVarsignedint64(input, &delta)) {
|
|
|
|
return Status::Corruption("bad delta-encoded index value");
|
|
|
|
}
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
// WART: this is specific to Block-based table
|
|
|
|
handle = BlockHandle(previous_handle->offset() + previous_handle->size() +
|
|
|
|
BlockBasedTable::kBlockTrailerSize,
|
|
|
|
previous_handle->size() + delta);
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
} else {
|
|
|
|
Status s = handle.DecodeFrom(input);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!have_first_key) {
|
|
|
|
first_internal_key = Slice();
|
|
|
|
} else if (!GetLengthPrefixedSlice(input, &first_internal_key)) {
|
|
|
|
return Status::Corruption("bad first key in block info");
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string IndexValue::ToString(bool hex, bool have_first_key) const {
|
|
|
|
std::string s;
|
|
|
|
EncodeTo(&s, have_first_key, nullptr);
|
|
|
|
if (hex) {
|
|
|
|
return Slice(s).ToString(true);
|
|
|
|
} else {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-13 22:33:04 +00:00
|
|
|
namespace {
|
|
|
|
inline bool IsLegacyFooterFormat(uint64_t magic_number) {
|
|
|
|
return magic_number == kLegacyBlockBasedTableMagicNumber ||
|
|
|
|
magic_number == kLegacyPlainTableMagicNumber;
|
|
|
|
}
|
|
|
|
inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) {
|
|
|
|
if (magic_number == kLegacyBlockBasedTableMagicNumber) {
|
|
|
|
return kBlockBasedTableMagicNumber;
|
|
|
|
}
|
|
|
|
if (magic_number == kLegacyPlainTableMagicNumber) {
|
|
|
|
return kPlainTableMagicNumber;
|
|
|
|
}
|
|
|
|
assert(false);
|
2021-12-14 01:42:05 +00:00
|
|
|
return magic_number;
|
2015-01-13 22:33:04 +00:00
|
|
|
}
|
2021-12-14 01:42:05 +00:00
|
|
|
inline uint64_t DownconvertToLegacyFooterFormat(uint64_t magic_number) {
|
|
|
|
if (magic_number == kBlockBasedTableMagicNumber) {
|
|
|
|
return kLegacyBlockBasedTableMagicNumber;
|
|
|
|
}
|
|
|
|
if (magic_number == kPlainTableMagicNumber) {
|
|
|
|
return kLegacyPlainTableMagicNumber;
|
|
|
|
}
|
|
|
|
assert(false);
|
|
|
|
return magic_number;
|
|
|
|
}
|
|
|
|
inline uint8_t BlockTrailerSizeForMagicNumber(uint64_t magic_number) {
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
if (magic_number == kBlockBasedTableMagicNumber ||
|
|
|
|
magic_number == kLegacyBlockBasedTableMagicNumber) {
|
2021-12-14 01:42:05 +00:00
|
|
|
return static_cast<uint8_t>(BlockBasedTable::kBlockTrailerSize);
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
} else {
|
2021-12-14 01:42:05 +00:00
|
|
|
return 0;
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-10 16:12:09 +00:00
|
|
|
// Footer format, in three parts:
|
|
|
|
// * Part1
|
|
|
|
// -> format_version == 0 (inferred from legacy magic number)
|
|
|
|
// <empty> (0 bytes)
|
|
|
|
// -> format_version >= 1
|
|
|
|
// checksum type (char, 1 byte)
|
|
|
|
// * Part2
|
|
|
|
// metaindex handle (varint64 offset, varint64 size)
|
|
|
|
// index handle (varint64 offset, varint64 size)
|
|
|
|
// <zero padding> for part2 size = 2 * BlockHandle::kMaxEncodedLength = 40
|
|
|
|
// * Part3
|
|
|
|
// -> format_version == 0 (inferred from legacy magic number)
|
|
|
|
// legacy magic number (8 bytes)
|
|
|
|
// -> format_version >= 1 (inferred from NOT legacy magic number)
|
|
|
|
// format_version (uint32LE, 4 bytes), also called "footer version"
|
|
|
|
// newer magic number (8 bytes)
|
|
|
|
|
2021-12-14 01:42:05 +00:00
|
|
|
constexpr size_t kFooterPart2Size = 2 * BlockHandle::kMaxEncodedLength;
|
|
|
|
} // namespace
|
2021-12-10 16:12:09 +00:00
|
|
|
|
2021-12-14 01:42:05 +00:00
|
|
|
void FooterBuilder::Build(uint64_t magic_number, uint32_t format_version,
|
|
|
|
uint64_t footer_offset, ChecksumType checksum_type,
|
|
|
|
const BlockHandle& metaindex_handle,
|
|
|
|
const BlockHandle& index_handle) {
|
|
|
|
(void)footer_offset; // Future use
|
|
|
|
|
|
|
|
assert(magic_number != Footer::kNullTableMagicNumber);
|
|
|
|
assert(IsSupportedFormatVersion(format_version));
|
2021-12-10 16:12:09 +00:00
|
|
|
|
|
|
|
char* part2;
|
2021-12-14 01:42:05 +00:00
|
|
|
char* part3;
|
|
|
|
if (format_version > 0) {
|
|
|
|
slice_ = Slice(data_.data(), Footer::kNewVersionsEncodedLength);
|
2021-12-10 16:12:09 +00:00
|
|
|
// Generate parts 1 and 3
|
2021-12-14 01:42:05 +00:00
|
|
|
char* cur = data_.data();
|
|
|
|
// Part 1
|
|
|
|
*(cur++) = checksum_type;
|
|
|
|
// Part 2
|
|
|
|
part2 = cur;
|
|
|
|
// Skip over part 2 for now
|
|
|
|
cur += kFooterPart2Size;
|
|
|
|
// Part 3
|
|
|
|
part3 = cur;
|
|
|
|
EncodeFixed32(cur, format_version);
|
|
|
|
cur += 4;
|
|
|
|
EncodeFixed64(cur, magic_number);
|
|
|
|
assert(cur + 8 == slice_.data() + slice_.size());
|
2014-05-01 18:09:32 +00:00
|
|
|
} else {
|
2021-12-14 01:42:05 +00:00
|
|
|
slice_ = Slice(data_.data(), Footer::kVersion0EncodedLength);
|
2021-12-10 16:12:09 +00:00
|
|
|
// Legacy SST files use kCRC32c checksum but it's not stored in footer.
|
2021-12-14 01:42:05 +00:00
|
|
|
assert(checksum_type == kNoChecksum || checksum_type == kCRC32c);
|
|
|
|
// Generate part 3 (part 1 empty, skip part 2 for now)
|
|
|
|
part2 = data_.data();
|
|
|
|
part3 = part2 + kFooterPart2Size;
|
|
|
|
char* cur = part3;
|
|
|
|
// Use legacy magic numbers to indicate format_version=0, for
|
|
|
|
// compatibility. No other cases should use format_version=0.
|
|
|
|
EncodeFixed64(cur, DownconvertToLegacyFooterFormat(magic_number));
|
|
|
|
assert(cur + 8 == slice_.data() + slice_.size());
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 01:42:05 +00:00
|
|
|
{
|
|
|
|
char* cur = part2;
|
|
|
|
cur = metaindex_handle.EncodeTo(cur);
|
|
|
|
cur = index_handle.EncodeTo(cur);
|
|
|
|
// Zero pad remainder
|
|
|
|
std::fill(cur, part3, char{0});
|
|
|
|
}
|
2015-01-13 22:33:04 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2021-12-14 01:42:05 +00:00
|
|
|
Status Footer::DecodeFrom(Slice input, uint64_t input_offset) {
|
2021-12-10 16:12:09 +00:00
|
|
|
(void)input_offset; // Future use
|
|
|
|
|
|
|
|
// Only decode to unused Footer
|
|
|
|
assert(table_magic_number_ == kNullTableMagicNumber);
|
2013-03-01 02:04:58 +00:00
|
|
|
assert(input != nullptr);
|
2021-12-14 01:42:05 +00:00
|
|
|
assert(input.size() >= kMinEncodedLength);
|
2013-01-09 18:44:30 +00:00
|
|
|
|
2021-12-14 01:42:05 +00:00
|
|
|
const char* magic_ptr = input.data() + input.size() - kMagicNumberLengthByte;
|
2021-12-10 16:12:09 +00:00
|
|
|
uint64_t magic = DecodeFixed64(magic_ptr);
|
2014-05-01 18:09:32 +00:00
|
|
|
|
|
|
|
// We check for legacy formats here and silently upconvert them
|
|
|
|
bool legacy = IsLegacyFooterFormat(magic);
|
|
|
|
if (legacy) {
|
|
|
|
magic = UpconvertLegacyFooterFormat(magic);
|
|
|
|
}
|
2021-12-14 01:42:05 +00:00
|
|
|
table_magic_number_ = magic;
|
|
|
|
block_trailer_size_ = BlockTrailerSizeForMagicNumber(magic);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2021-12-10 16:12:09 +00:00
|
|
|
// Parse Part3
|
2014-05-01 18:09:32 +00:00
|
|
|
if (legacy) {
|
|
|
|
// The size is already asserted to be at least kMinEncodedLength
|
|
|
|
// at the beginning of the function
|
2021-12-14 01:42:05 +00:00
|
|
|
input.remove_prefix(input.size() - kVersion0EncodedLength);
|
2021-12-10 16:12:09 +00:00
|
|
|
format_version_ = 0 /* legacy */;
|
|
|
|
checksum_type_ = kCRC32c;
|
2014-05-01 18:09:32 +00:00
|
|
|
} else {
|
2021-12-10 16:12:09 +00:00
|
|
|
const char* part3_ptr = magic_ptr - 4;
|
|
|
|
format_version_ = DecodeFixed32(part3_ptr);
|
|
|
|
if (!IsSupportedFormatVersion(format_version_)) {
|
|
|
|
return Status::Corruption("Corrupt or unsupported format_version: " +
|
2022-05-06 20:03:58 +00:00
|
|
|
std::to_string(format_version_));
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
2021-12-10 16:12:09 +00:00
|
|
|
// All known format versions >= 1 occupy exactly this many bytes.
|
2021-12-14 01:42:05 +00:00
|
|
|
if (input.size() < kNewVersionsEncodedLength) {
|
2021-12-10 16:12:09 +00:00
|
|
|
return Status::Corruption("Input is too short to be an SST file");
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
2021-12-14 01:42:05 +00:00
|
|
|
uint64_t adjustment = input.size() - kNewVersionsEncodedLength;
|
|
|
|
input.remove_prefix(adjustment);
|
2021-12-10 16:12:09 +00:00
|
|
|
|
|
|
|
// Parse Part1
|
2021-12-14 01:42:05 +00:00
|
|
|
char chksum = input.data()[0];
|
2021-12-10 16:12:09 +00:00
|
|
|
checksum_type_ = lossless_cast<ChecksumType>(chksum);
|
|
|
|
if (!IsSupportedChecksumType(checksum_type())) {
|
2022-05-06 20:03:58 +00:00
|
|
|
return Status::Corruption("Corrupt or unsupported checksum type: " +
|
|
|
|
std::to_string(lossless_cast<uint8_t>(chksum)));
|
2021-11-04 16:08:12 +00:00
|
|
|
}
|
2021-12-10 16:12:09 +00:00
|
|
|
// Consume checksum type field
|
2021-12-14 01:42:05 +00:00
|
|
|
input.remove_prefix(1);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 16:12:09 +00:00
|
|
|
// Parse Part2
|
2021-12-14 01:42:05 +00:00
|
|
|
Status result = metaindex_handle_.DecodeFrom(&input);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (result.ok()) {
|
2021-12-14 01:42:05 +00:00
|
|
|
result = index_handle_.DecodeFrom(&input);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2021-12-14 01:42:05 +00:00
|
|
|
return result;
|
|
|
|
// Padding in part2 is ignored
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-12-23 21:24:07 +00:00
|
|
|
std::string Footer::ToString() const {
|
2018-02-16 00:43:23 +00:00
|
|
|
std::string result;
|
2014-12-23 21:24:07 +00:00
|
|
|
result.reserve(1024);
|
|
|
|
|
|
|
|
bool legacy = IsLegacyFooterFormat(table_magic_number_);
|
|
|
|
if (legacy) {
|
|
|
|
result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n ");
|
|
|
|
result.append("index handle: " + index_handle_.ToString() + "\n ");
|
2022-05-06 20:03:58 +00:00
|
|
|
result.append("table_magic_number: " + std::to_string(table_magic_number_) +
|
|
|
|
"\n ");
|
2014-12-23 21:24:07 +00:00
|
|
|
} else {
|
|
|
|
result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n ");
|
|
|
|
result.append("index handle: " + index_handle_.ToString() + "\n ");
|
2022-05-06 20:03:58 +00:00
|
|
|
result.append("table_magic_number: " + std::to_string(table_magic_number_) +
|
|
|
|
"\n ");
|
|
|
|
result.append("format version: " + std::to_string(format_version_) +
|
|
|
|
"\n ");
|
2014-12-23 21:24:07 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:51:57 +00:00
|
|
|
Status ReadFooterFromFile(const IOOptions& opts, RandomAccessFileReader* file,
|
2017-08-11 18:59:13 +00:00
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
uint64_t file_size, Footer* footer,
|
|
|
|
uint64_t enforce_table_magic_number) {
|
2014-05-01 18:09:32 +00:00
|
|
|
if (file_size < Footer::kMinEncodedLength) {
|
2022-05-06 20:03:58 +00:00
|
|
|
return Status::Corruption("file is too short (" +
|
|
|
|
std::to_string(file_size) +
|
2019-03-27 23:13:08 +00:00
|
|
|
" bytes) to be an "
|
|
|
|
"sstable: " +
|
|
|
|
file->file_name());
|
2013-12-05 00:35:48 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 22:02:09 +00:00
|
|
|
std::string footer_buf;
|
2020-04-24 22:30:12 +00:00
|
|
|
AlignedBuf internal_buf;
|
2013-12-05 00:35:48 +00:00
|
|
|
Slice footer_input;
|
2021-12-10 16:12:09 +00:00
|
|
|
uint64_t read_offset = (file_size > Footer::kMaxEncodedLength)
|
|
|
|
? file_size - Footer::kMaxEncodedLength
|
|
|
|
: 0;
|
2017-08-11 18:59:13 +00:00
|
|
|
Status s;
|
2020-06-29 21:51:57 +00:00
|
|
|
// TODO: Need to pass appropriate deadline to TryReadFromCache(). Right now,
|
|
|
|
// there is no readahead for point lookups, so TryReadFromCache will fail if
|
|
|
|
// the required data is not in the prefetch buffer. Once deadline is enabled
|
|
|
|
// for iterator, TryReadFromCache might do a readahead. Revisit to see if we
|
|
|
|
// need to pass a timeout at that point
|
2022-02-17 07:17:03 +00:00
|
|
|
// TODO: rate limit footer reads.
|
2017-08-11 18:59:13 +00:00
|
|
|
if (prefetch_buffer == nullptr ||
|
2022-02-17 07:17:03 +00:00
|
|
|
!prefetch_buffer->TryReadFromCache(
|
Set Read rate limiter priority dynamically and pass it to FS (#9996)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
### Solution
User, Flush, and Compaction reads share some code path. For this task, we update the rate_limiter_priority in ReadOptions for code paths (e.g. FindTable (mainly in BlockBasedTable::Open()) and various iterators), and eventually update the rate_limiter_priority in IOOptions for FSRandomAccessFile.
**This PR is for the Read path.** The **Read:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush (verification read in BuildTable()) | IO_USER | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
| User | User provided | User provided | User provided |
We will respect the read_options that the user provided and will not set it.
The only sst read for Flush is the verification read in BuildTable(). It claims to be "regard as user read".
**Details**
1. Set read_options.rate_limiter_priority dynamically:
- User: Do not update the read_options. Use the read_options that the user provided.
- Compaction: Update read_options in CompactionJob::ProcessKeyValueCompaction().
- Flush: Update read_options in BuildTable().
2. Pass the rate limiter priority to FSRandomAccessFile functions:
- After calling the FindTable(), read_options is passed through GetTableReader(table_cache.cc), BlockBasedTableFactory::NewTableReader(block_based_table_factory.cc), and BlockBasedTable::Open(). The Open() needs some updates for the ReadOptions variable and the updates are also needed for the called functions, including PrefetchTail(), PrepareIOOptions(), ReadFooterFromFile(), ReadMetaIndexblock(), ReadPropertiesBlock(), PrefetchIndexAndFilterBlocks(), and ReadRangeDelBlock().
- In RandomAccessFileReader, the functions to be updated include Read(), MultiRead(), ReadAsync(), and Prefetch().
- Update the downstream functions of NewIndexIterator(), NewDataBlockIterator(), and BlockBasedTableIterator().
### Test Plans
Add unit tests.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9996
Reviewed By: anand1976
Differential Revision: D36452483
Pulled By: gitbw95
fbshipit-source-id: 60978204a4f849bb9261cb78d9bc1cb56d6008cf
2022-05-19 02:41:44 +00:00
|
|
|
opts, file, read_offset, Footer::kMaxEncodedLength, &footer_input,
|
|
|
|
nullptr, opts.rate_limiter_priority)) {
|
2020-03-06 22:02:09 +00:00
|
|
|
if (file->use_direct_io()) {
|
2020-06-29 21:51:57 +00:00
|
|
|
s = file->Read(opts, read_offset, Footer::kMaxEncodedLength,
|
2022-02-17 07:17:03 +00:00
|
|
|
&footer_input, nullptr, &internal_buf,
|
Set Read rate limiter priority dynamically and pass it to FS (#9996)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
### Solution
User, Flush, and Compaction reads share some code path. For this task, we update the rate_limiter_priority in ReadOptions for code paths (e.g. FindTable (mainly in BlockBasedTable::Open()) and various iterators), and eventually update the rate_limiter_priority in IOOptions for FSRandomAccessFile.
**This PR is for the Read path.** The **Read:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush (verification read in BuildTable()) | IO_USER | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
| User | User provided | User provided | User provided |
We will respect the read_options that the user provided and will not set it.
The only sst read for Flush is the verification read in BuildTable(). It claims to be "regard as user read".
**Details**
1. Set read_options.rate_limiter_priority dynamically:
- User: Do not update the read_options. Use the read_options that the user provided.
- Compaction: Update read_options in CompactionJob::ProcessKeyValueCompaction().
- Flush: Update read_options in BuildTable().
2. Pass the rate limiter priority to FSRandomAccessFile functions:
- After calling the FindTable(), read_options is passed through GetTableReader(table_cache.cc), BlockBasedTableFactory::NewTableReader(block_based_table_factory.cc), and BlockBasedTable::Open(). The Open() needs some updates for the ReadOptions variable and the updates are also needed for the called functions, including PrefetchTail(), PrepareIOOptions(), ReadFooterFromFile(), ReadMetaIndexblock(), ReadPropertiesBlock(), PrefetchIndexAndFilterBlocks(), and ReadRangeDelBlock().
- In RandomAccessFileReader, the functions to be updated include Read(), MultiRead(), ReadAsync(), and Prefetch().
- Update the downstream functions of NewIndexIterator(), NewDataBlockIterator(), and BlockBasedTableIterator().
### Test Plans
Add unit tests.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9996
Reviewed By: anand1976
Differential Revision: D36452483
Pulled By: gitbw95
fbshipit-source-id: 60978204a4f849bb9261cb78d9bc1cb56d6008cf
2022-05-19 02:41:44 +00:00
|
|
|
opts.rate_limiter_priority);
|
2020-03-06 22:02:09 +00:00
|
|
|
} else {
|
|
|
|
footer_buf.reserve(Footer::kMaxEncodedLength);
|
2020-06-29 21:51:57 +00:00
|
|
|
s = file->Read(opts, read_offset, Footer::kMaxEncodedLength,
|
2022-02-17 07:17:03 +00:00
|
|
|
&footer_input, &footer_buf[0], nullptr,
|
Set Read rate limiter priority dynamically and pass it to FS (#9996)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
### Solution
User, Flush, and Compaction reads share some code path. For this task, we update the rate_limiter_priority in ReadOptions for code paths (e.g. FindTable (mainly in BlockBasedTable::Open()) and various iterators), and eventually update the rate_limiter_priority in IOOptions for FSRandomAccessFile.
**This PR is for the Read path.** The **Read:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush (verification read in BuildTable()) | IO_USER | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
| User | User provided | User provided | User provided |
We will respect the read_options that the user provided and will not set it.
The only sst read for Flush is the verification read in BuildTable(). It claims to be "regard as user read".
**Details**
1. Set read_options.rate_limiter_priority dynamically:
- User: Do not update the read_options. Use the read_options that the user provided.
- Compaction: Update read_options in CompactionJob::ProcessKeyValueCompaction().
- Flush: Update read_options in BuildTable().
2. Pass the rate limiter priority to FSRandomAccessFile functions:
- After calling the FindTable(), read_options is passed through GetTableReader(table_cache.cc), BlockBasedTableFactory::NewTableReader(block_based_table_factory.cc), and BlockBasedTable::Open(). The Open() needs some updates for the ReadOptions variable and the updates are also needed for the called functions, including PrefetchTail(), PrepareIOOptions(), ReadFooterFromFile(), ReadMetaIndexblock(), ReadPropertiesBlock(), PrefetchIndexAndFilterBlocks(), and ReadRangeDelBlock().
- In RandomAccessFileReader, the functions to be updated include Read(), MultiRead(), ReadAsync(), and Prefetch().
- Update the downstream functions of NewIndexIterator(), NewDataBlockIterator(), and BlockBasedTableIterator().
### Test Plans
Add unit tests.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9996
Reviewed By: anand1976
Differential Revision: D36452483
Pulled By: gitbw95
fbshipit-source-id: 60978204a4f849bb9261cb78d9bc1cb56d6008cf
2022-05-19 02:41:44 +00:00
|
|
|
opts.rate_limiter_priority);
|
2020-03-06 22:02:09 +00:00
|
|
|
}
|
2017-08-11 18:59:13 +00:00
|
|
|
if (!s.ok()) return s;
|
|
|
|
}
|
2013-12-05 00:35:48 +00:00
|
|
|
|
|
|
|
// Check that we actually read the whole footer from the file. It may be
|
|
|
|
// that size isn't correct.
|
2014-05-01 18:09:32 +00:00
|
|
|
if (footer_input.size() < Footer::kMinEncodedLength) {
|
Always verify SST unique IDs on SST file open (#10532)
Summary:
Although we've been tracking SST unique IDs in the DB manifest
unconditionally, checking has been opt-in and with an extra pass at DB::Open
time. This changes the behavior of `verify_sst_unique_id_in_manifest` to
check unique ID against manifest every time an SST file is opened through
table cache (normal DB operations), replacing the explicit pass over files
at DB::Open time. This change also enables the option by default and
removes the "EXPERIMENTAL" designation.
One possible criticism is that the option no longer ensures the integrity
of a DB at Open time. This is far from an all-or-nothing issue. Verifying
the IDs of all SST files hardly ensures all the data in the DB is readable.
(VerifyChecksum is supposed to do that.) Also, with
max_open_files=-1 (default, extremely common), all SST files are
opened at DB::Open time anyway.
Implementation details:
* `VerifySstUniqueIdInManifest()` functions are the extra/explicit pass
that is now removed.
* Unit tests that manipulate/corrupt table properties have to opt out of
this check, because that corrupts the "actual" unique id. (And even for
testing we don't currently have a mechanism to set "no unique id"
in the in-memory file metadata for new files.)
* A lot of other unit test churn relates to (a) default checking on, and
(b) checking on SST open even without DB::Open (e.g. on flush)
* Use `FileMetaData` for more `TableCache` operations (in place of
`FileDescriptor`) so that we have access to the unique_id whenever
we might need to open an SST file. **There is the possibility of
performance impact because we can no longer use the more
localized `fd` part of an `FdWithKeyRange` but instead follow the
`file_metadata` pointer. However, this change (possible regression)
is only done for `GetMemoryUsageByTableReaders`.**
* Removed a completely unnecessary constructor overload of
`TableReaderOptions`
Possible follow-up:
* Verification only happens when opening through table cache. Are there
more places where this should happen?
* Improve error message when there is a file size mismatch vs. manifest
(FIXME added in the appropriate place).
* I'm not sure there's a justification for `FileDescriptor` to be distinct from
`FileMetaData`.
* I'm skeptical that `FdWithKeyRange` really still makes sense for
optimizing some data locality by duplicating some data in memory, but I
could be wrong.
* An unnecessary overload of NewTableReader was recently added, in
the public API nonetheless (though unusable there). It should be cleaned
up to put most things under `TableReaderOptions`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10532
Test Plan:
updated unit tests
Performance test showing no significant difference (just noise I think):
`./db_bench -benchmarks=readwhilewriting[-X10] -num=3000000 -disable_wal=1 -bloom_bits=8 -write_buffer_size=1000000 -target_file_size_base=1000000`
Before: readwhilewriting [AVG 10 runs] : 68702 (± 6932) ops/sec
After: readwhilewriting [AVG 10 runs] : 68239 (± 7198) ops/sec
Reviewed By: jay-zhuang
Differential Revision: D38765551
Pulled By: pdillinger
fbshipit-source-id: a827a708155f12344ab2a5c16e7701c7636da4c2
2022-09-08 05:52:42 +00:00
|
|
|
// FIXME: this error message is bad. We should be checking whether the
|
|
|
|
// provided file_size matches what's on disk, at least in this case.
|
|
|
|
// Unfortunately FileSystem/Env does not provide a way to get the size
|
|
|
|
// of an open file, so getting file size requires a full path seek.
|
2022-05-06 20:03:58 +00:00
|
|
|
return Status::Corruption("file is too short (" +
|
|
|
|
std::to_string(file_size) +
|
2019-03-27 23:13:08 +00:00
|
|
|
" bytes) to be an "
|
|
|
|
"sstable" +
|
|
|
|
file->file_name());
|
2013-12-05 00:35:48 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 01:42:05 +00:00
|
|
|
s = footer->DecodeFrom(footer_input, read_offset);
|
2015-01-13 22:33:04 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (enforce_table_magic_number != 0 &&
|
|
|
|
enforce_table_magic_number != footer->table_magic_number()) {
|
2022-05-06 20:03:58 +00:00
|
|
|
return Status::Corruption("Bad table magic number: expected " +
|
|
|
|
std::to_string(enforce_table_magic_number) +
|
|
|
|
", found " +
|
|
|
|
std::to_string(footer->table_magic_number()) +
|
|
|
|
" in " + file->file_name());
|
2015-01-13 22:33:04 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
2013-12-05 00:35:48 +00:00
|
|
|
}
|
|
|
|
|
2021-11-04 16:08:12 +00:00
|
|
|
namespace {
|
|
|
|
// Custom handling for the last byte of a block, to avoid invoking streaming
|
|
|
|
// API to get an effective block checksum. This function is its own inverse
|
|
|
|
// because it uses xor.
|
|
|
|
inline uint32_t ModifyChecksumForLastByte(uint32_t checksum, char last_byte) {
|
|
|
|
// This strategy bears some resemblance to extending a CRC checksum by one
|
|
|
|
// more byte, except we don't need to re-mix the input checksum as long as
|
|
|
|
// we do this step only once (per checksum).
|
|
|
|
const uint32_t kRandomPrime = 0x6b9083d9;
|
2021-12-10 16:12:09 +00:00
|
|
|
return checksum ^ lossless_cast<uint8_t>(last_byte) * kRandomPrime;
|
2021-11-04 16:08:12 +00:00
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
uint32_t ComputeBuiltinChecksum(ChecksumType type, const char* data,
|
|
|
|
size_t data_size) {
|
|
|
|
switch (type) {
|
|
|
|
case kCRC32c:
|
|
|
|
return crc32c::Mask(crc32c::Value(data, data_size));
|
|
|
|
case kxxHash:
|
|
|
|
return XXH32(data, data_size, /*seed*/ 0);
|
|
|
|
case kxxHash64:
|
|
|
|
return Lower32of64(XXH64(data, data_size, /*seed*/ 0));
|
|
|
|
case kXXH3: {
|
|
|
|
if (data_size == 0) {
|
|
|
|
// Special case because of special handling for last byte, not
|
|
|
|
// present in this case. Can be any value different from other
|
|
|
|
// small input size checksums.
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
// See corresponding code in ComputeBuiltinChecksumWithLastByte
|
|
|
|
uint32_t v = Lower32of64(XXH3_64bits(data, data_size - 1));
|
|
|
|
return ModifyChecksumForLastByte(v, data[data_size - 1]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default: // including kNoChecksum
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t ComputeBuiltinChecksumWithLastByte(ChecksumType type, const char* data,
|
|
|
|
size_t data_size, char last_byte) {
|
|
|
|
switch (type) {
|
|
|
|
case kCRC32c: {
|
|
|
|
uint32_t crc = crc32c::Value(data, data_size);
|
|
|
|
// Extend to cover last byte (compression type)
|
|
|
|
crc = crc32c::Extend(crc, &last_byte, 1);
|
|
|
|
return crc32c::Mask(crc);
|
|
|
|
}
|
|
|
|
case kxxHash: {
|
|
|
|
XXH32_state_t* const state = XXH32_createState();
|
|
|
|
XXH32_reset(state, 0);
|
|
|
|
XXH32_update(state, data, data_size);
|
|
|
|
// Extend to cover last byte (compression type)
|
|
|
|
XXH32_update(state, &last_byte, 1);
|
|
|
|
uint32_t v = XXH32_digest(state);
|
|
|
|
XXH32_freeState(state);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
case kxxHash64: {
|
|
|
|
XXH64_state_t* const state = XXH64_createState();
|
|
|
|
XXH64_reset(state, 0);
|
|
|
|
XXH64_update(state, data, data_size);
|
|
|
|
// Extend to cover last byte (compression type)
|
|
|
|
XXH64_update(state, &last_byte, 1);
|
|
|
|
uint32_t v = Lower32of64(XXH64_digest(state));
|
|
|
|
XXH64_freeState(state);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
case kXXH3: {
|
|
|
|
// XXH3 is a complicated hash function that is extremely fast on
|
|
|
|
// contiguous input, but that makes its streaming support rather
|
|
|
|
// complex. It is worth custom handling of the last byte (`type`)
|
|
|
|
// in order to avoid allocating a large state object and bringing
|
|
|
|
// that code complexity into CPU working set.
|
|
|
|
uint32_t v = Lower32of64(XXH3_64bits(data, data_size));
|
|
|
|
return ModifyChecksumForLastByte(v, last_byte);
|
|
|
|
}
|
|
|
|
default: // including kNoChecksum
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-05 19:51:05 +00:00
|
|
|
Status UncompressBlockContentsForCompressionType(
|
2019-01-19 03:10:17 +00:00
|
|
|
const UncompressionInfo& uncompression_info, const char* data, size_t n,
|
2018-06-05 19:51:05 +00:00
|
|
|
BlockContents* contents, uint32_t format_version,
|
2021-05-05 20:59:21 +00:00
|
|
|
const ImmutableOptions& ioptions, MemoryAllocator* allocator) {
|
2020-05-12 16:25:21 +00:00
|
|
|
Status ret = Status::OK();
|
2016-06-11 01:20:54 +00:00
|
|
|
|
2019-01-19 03:10:17 +00:00
|
|
|
assert(uncompression_info.type() != kNoCompression &&
|
2018-06-05 19:51:05 +00:00
|
|
|
"Invalid compression type");
|
2016-06-11 01:20:54 +00:00
|
|
|
|
2021-04-26 19:43:02 +00:00
|
|
|
StopWatchNano timer(ioptions.clock,
|
|
|
|
ShouldReportDetailedTime(ioptions.env, ioptions.stats));
|
2020-09-25 16:00:29 +00:00
|
|
|
size_t uncompressed_size = 0;
|
|
|
|
CacheAllocationPtr ubuf =
|
|
|
|
UncompressData(uncompression_info, data, n, &uncompressed_size,
|
|
|
|
GetCompressFormatForVersion(format_version), allocator);
|
|
|
|
if (!ubuf) {
|
2021-09-27 17:37:09 +00:00
|
|
|
if (!CompressionTypeSupported(uncompression_info.type())) {
|
|
|
|
return Status::NotSupported(
|
|
|
|
"Unsupported compression method for this build",
|
|
|
|
CompressionTypeToString(uncompression_info.type()));
|
|
|
|
} else {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Corrupted compressed block contents",
|
|
|
|
CompressionTypeToString(uncompression_info.type()));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2015-12-16 02:20:10 +00:00
|
|
|
|
2020-09-25 16:00:29 +00:00
|
|
|
*contents = BlockContents(std::move(ubuf), uncompressed_size);
|
|
|
|
|
2021-04-26 19:43:02 +00:00
|
|
|
if (ShouldReportDetailedTime(ioptions.env, ioptions.stats)) {
|
|
|
|
RecordTimeToHistogram(ioptions.stats, DECOMPRESSION_TIMES_NANOS,
|
2019-02-28 18:14:19 +00:00
|
|
|
timer.ElapsedNanos());
|
2016-07-19 16:44:03 +00:00
|
|
|
}
|
2021-04-26 19:43:02 +00:00
|
|
|
RecordTimeToHistogram(ioptions.stats, BYTES_DECOMPRESSED,
|
2019-02-28 18:14:19 +00:00
|
|
|
contents->data.size());
|
2021-04-26 19:43:02 +00:00
|
|
|
RecordTick(ioptions.stats, NUMBER_BLOCK_DECOMPRESSED);
|
2016-07-19 16:44:03 +00:00
|
|
|
|
2020-05-12 16:25:21 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"UncompressBlockContentsForCompressionType:TamperWithReturnValue",
|
|
|
|
static_cast<void*>(&ret));
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"UncompressBlockContentsForCompressionType:"
|
|
|
|
"TamperWithDecompressionOutput",
|
|
|
|
static_cast<void*>(contents));
|
|
|
|
|
|
|
|
return ret;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-11 01:20:54 +00:00
|
|
|
//
|
|
|
|
// The 'data' points to the raw block contents that was read in from file.
|
|
|
|
// This method allocates a new heap buffer and the raw block
|
|
|
|
// contents are uncompresed into this buffer. This
|
|
|
|
// buffer is returned via 'result' and it is upto the caller to
|
|
|
|
// free this buffer.
|
|
|
|
// format_version is the block format as defined in include/rocksdb/table.h
|
2019-01-19 03:10:17 +00:00
|
|
|
Status UncompressBlockContents(const UncompressionInfo& uncompression_info,
|
2018-06-04 19:04:52 +00:00
|
|
|
const char* data, size_t n,
|
2016-06-11 01:20:54 +00:00
|
|
|
BlockContents* contents, uint32_t format_version,
|
2021-05-05 20:59:21 +00:00
|
|
|
const ImmutableOptions& ioptions,
|
2018-10-26 21:27:09 +00:00
|
|
|
MemoryAllocator* allocator) {
|
2016-06-11 01:20:54 +00:00
|
|
|
assert(data[n] != kNoCompression);
|
2020-04-30 19:08:34 +00:00
|
|
|
assert(data[n] == static_cast<char>(uncompression_info.type()));
|
2019-01-19 03:10:17 +00:00
|
|
|
return UncompressBlockContentsForCompressionType(uncompression_info, data, n,
|
2018-10-03 00:21:54 +00:00
|
|
|
contents, format_version,
|
|
|
|
ioptions, allocator);
|
2016-06-11 01:20:54 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 18:37:05 +00:00
|
|
|
// Replace the contents of db_host_id with the actual hostname, if db_host_id
|
|
|
|
// matches the keyword kHostnameForDbHostId
|
|
|
|
Status ReifyDbHostIdProperty(Env* env, std::string* db_host_id) {
|
|
|
|
assert(db_host_id);
|
|
|
|
if (*db_host_id == kHostnameForDbHostId) {
|
|
|
|
Status s = env->GetHostNameString(db_host_id);
|
|
|
|
if (!s.ok()) {
|
|
|
|
db_host_id->clear();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|