2017-12-11 23:16:37 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/block_fetcher.h"
|
|
|
|
|
2022-05-23 19:15:26 +00:00
|
|
|
#include <cassert>
|
2019-06-06 20:52:39 +00:00
|
|
|
#include <cinttypes>
|
2019-03-27 23:13:08 +00:00
|
|
|
#include <string>
|
2017-12-11 23:16:37 +00:00
|
|
|
|
2019-06-01 00:19:43 +00:00
|
|
|
#include "logging/logging.h"
|
2023-05-17 18:27:09 +00:00
|
|
|
#include "memory/memory_allocator_impl.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
#include "rocksdb/compression_type.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "rocksdb/env.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block.h"
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
#include "table/block_based/block_type.h"
|
2020-06-19 23:16:57 +00:00
|
|
|
#include "table/block_based/reader_common.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "table/format.h"
|
2018-10-03 00:21:54 +00:00
|
|
|
#include "table/persistent_cache_helper.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "util/compression.h"
|
|
|
|
#include "util/stop_watch.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-12-11 23:16:37 +00:00
|
|
|
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
inline void BlockFetcher::ProcessTrailerIfPresent() {
|
|
|
|
if (footer_.GetBlockTrailerSize() > 0) {
|
|
|
|
assert(footer_.GetBlockTrailerSize() == BlockBasedTable::kBlockTrailerSize);
|
|
|
|
if (read_options_.verify_checksums) {
|
format_version=6 and context-aware block checksums (#9058)
Summary:
## Context checksum
All RocksDB checksums currently use 32 bits of checking
power, which should be 1 in 4 billion false negative (FN) probability (failing to
detect corruption). This is true for random corruptions, and in some cases
small corruptions are guaranteed to be detected. But some possible
corruptions, such as in storage metadata rather than storage payload data,
would have a much higher FN rate. For example:
* Data larger than one SST block is replaced by data from elsewhere in
the same or another SST file. Especially with block_align=true, the
probability of exact block size match is probably around 1 in 100, making
the FN probability around that same. Without `block_align=true` the
probability of same block start location is probably around 1 in 10,000,
for FN probability around 1 in a million.
To solve this problem in new format_version=6, we add "context awareness"
to block checksum checks. The stored and expected checksum value is
modified based on the block's position in the file and which file it is in. The
modifications are cleverly chosen so that, for example
* blocks within about 4GB of each other are guaranteed to use different context
* blocks that are offset by exactly some multiple of 4GiB are guaranteed to use
different context
* files generated by the same process are guaranteed to use different context
for the same offsets, until wrap-around after 2^32 - 1 files
Thus, with format_version=6, if a valid SST block and checksum is misplaced,
its checksum FN probability should be essentially ideal, 1 in 4B.
## Footer checksum
This change also adds checksum protection to the SST footer (with
format_version=6), for the first time without relying on whole file checksum.
To prevent a corruption of the format_version in the footer (e.g. 6 -> 5) to
defeat the footer checksum, we change much of the footer data format
including an "extended magic number" in format_version 6 that would be
interpreted as empty index and metaindex block handles in older footer
versions. We also change the encoding of handles to free up space for
other new data in footer.
## More detail: making space in footer
In order to keep footer the same size in format_version=6 (avoid change to IO
patterns), we have to free up some space for new data. We do this two ways:
* Metaindex block handle is encoded down to 4 bytes (from 10) by assuming
it immediately precedes the footer, and by assuming it is < 4GB.
* Index block handle is moved into metaindex. (I don't know why it was
in footer to begin with.)
## Performance
In case of small performance penalty, I've made a "pay as you go" optimization
to compensate: replace `MutableCFOptions` in BlockBasedTableBuilder::Rep
with the only field used in that structure after construction: `prefix_extractor`.
This makes the PR an overall performance improvement (results below).
Nevertheless I'm seeing essentially no difference going from fv=5 to fv=6,
even including that improvement for both. That's based on extreme case table
write performance testing, many files with many blocks. This is relatively
checksum intensive (small blocks) and salt generation intensive (small files).
```
(for I in `seq 1 100`; do TEST_TMPDIR=/dev/shm/dbbench2 ./db_bench -benchmarks=fillseq -memtablerep=vector -disable_wal=1 -allow_concurrent_memtable_write=false -num=3000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -write_buffer_size=100000 -compression_type=none -block_size=1000; done) 2>&1 | grep micros/op | tee out
awk '{ tot += $5; n += 1; } END { print int(1.0 * tot / n) }' < out
```
Each value below is ops/s averaged over 100 runs, run simultaneously with competing
configuration for load fairness
Before -> after (both fv=5): 483530 -> 483673 (negligible)
Re-run 1: 480733 -> 485427 (1.0% faster)
Re-run 2: 483821 -> 484541 (0.1% faster)
Before (fv=5) -> after (fv=6): 482006 -> 485100 (0.6% faster)
Re-run 1: 482212 -> 485075 (0.6% faster)
Re-run 2: 483590 -> 484073 (0.1% faster)
After fv=5 -> after fv=6: 483878 -> 485542 (0.3% faster)
Re-run 1: 485331 -> 483385 (0.4% slower)
Re-run 2: 485283 -> 483435 (0.4% slower)
Re-run 3: 483647 -> 486109 (0.5% faster)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9058
Test Plan:
unit tests included (table_test, db_properties_test, salt in env_test). General DB tests
and crash test updated to test new format_version.
Also temporarily updated the default format version to 6 and saw some test failures. Almost all
were due to an inadvertent additional read in VerifyChecksum to verify the index block checksum,
though it's arguably a bug that VerifyChecksum does not appear to (re-)verify the index block
checksum, just assuming it was verified in opening the index reader (probably *usually* true but
probably not always true). Some other concerns about VerifyChecksum are left in FIXME
comments. The only remaining test failure on change of default (in block_fetcher_test) now
has a comment about how to upgrade the test.
The format compatibility test does not need updating because we have not updated the default
format_version.
Reviewed By: ajkr, mrambacher
Differential Revision: D33100915
Pulled By: pdillinger
fbshipit-source-id: 8679e3e572fa580181a737fd6d113ed53c5422ee
2023-07-30 23:40:01 +00:00
|
|
|
io_status_ = status_to_io_status(
|
|
|
|
VerifyBlockChecksum(footer_, slice_.data(), block_size_,
|
|
|
|
file_->file_name(), handle_.offset()));
|
2022-03-29 18:54:54 +00:00
|
|
|
RecordTick(ioptions_.stats, BLOCK_CHECKSUM_COMPUTE_COUNT);
|
2023-05-13 01:16:11 +00:00
|
|
|
if (!io_status_.ok()) {
|
|
|
|
assert(io_status_.IsCorruption());
|
|
|
|
RecordTick(ioptions_.stats, BLOCK_CHECKSUM_MISMATCH_COUNT);
|
|
|
|
}
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
}
|
|
|
|
compression_type_ =
|
|
|
|
BlockBasedTable::GetBlockCompressionType(slice_.data(), block_size_);
|
|
|
|
} else {
|
|
|
|
// E.g. plain table or cuckoo table
|
|
|
|
compression_type_ = kNoCompression;
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:13:08 +00:00
|
|
|
inline bool BlockFetcher::TryGetUncompressBlockFromPersistentCache() {
|
2017-12-11 23:16:37 +00:00
|
|
|
if (cache_options_.persistent_cache &&
|
|
|
|
!cache_options_.persistent_cache->IsCompressed()) {
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
Status status = PersistentCacheHelper::LookupUncompressed(
|
2017-12-11 23:16:37 +00:00
|
|
|
cache_options_, handle_, contents_);
|
|
|
|
if (status.ok()) {
|
|
|
|
// uncompressed page is found for the block handle
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
// uncompressed page is not found
|
2021-04-26 19:43:02 +00:00
|
|
|
if (ioptions_.logger && !status.IsNotFound()) {
|
2017-12-11 23:16:37 +00:00
|
|
|
assert(!status.ok());
|
2021-04-26 19:43:02 +00:00
|
|
|
ROCKS_LOG_INFO(ioptions_.logger,
|
2017-12-11 23:16:37 +00:00
|
|
|
"Error reading from persistent cache. %s",
|
|
|
|
status.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:13:08 +00:00
|
|
|
inline bool BlockFetcher::TryGetFromPrefetchBuffer() {
|
2020-06-29 21:51:57 +00:00
|
|
|
if (prefetch_buffer_ != nullptr) {
|
|
|
|
IOOptions opts;
|
2021-04-01 17:06:55 +00:00
|
|
|
IOStatus io_s = file_->PrepareIOOptions(read_options_, opts);
|
2022-03-21 14:12:43 +00:00
|
|
|
if (io_s.ok()) {
|
2024-01-05 17:29:01 +00:00
|
|
|
bool read_from_prefetch_buffer = prefetch_buffer_->TryReadFromCache(
|
|
|
|
opts, file_, handle_.offset(), block_size_with_trailer_, &slice_,
|
|
|
|
&io_s, for_compaction_);
|
2022-03-21 14:12:43 +00:00
|
|
|
if (read_from_prefetch_buffer) {
|
|
|
|
ProcessTrailerIfPresent();
|
2024-03-18 23:16:05 +00:00
|
|
|
if (io_status_.ok()) {
|
|
|
|
got_from_prefetch_buffer_ = true;
|
|
|
|
used_buf_ = const_cast<char*>(slice_.data());
|
2024-08-12 22:32:07 +00:00
|
|
|
} else if (io_status_.IsCorruption()) {
|
|
|
|
// Returning true apparently indicates we either got some data from
|
|
|
|
// the prefetch buffer, or we tried and encountered an error.
|
2022-03-21 14:12:43 +00:00
|
|
|
return true;
|
|
|
|
}
|
2020-06-29 21:51:57 +00:00
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
if (!io_s.ok()) {
|
2021-04-01 17:06:55 +00:00
|
|
|
io_status_ = io_s;
|
2020-12-30 17:24:04 +00:00
|
|
|
return true;
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return got_from_prefetch_buffer_;
|
|
|
|
}
|
|
|
|
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
inline bool BlockFetcher::TryGetSerializedBlockFromPersistentCache() {
|
2017-12-11 23:16:37 +00:00
|
|
|
if (cache_options_.persistent_cache &&
|
|
|
|
cache_options_.persistent_cache->IsCompressed()) {
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
std::unique_ptr<char[]> buf;
|
|
|
|
io_status_ = status_to_io_status(PersistentCacheHelper::LookupSerialized(
|
|
|
|
cache_options_, handle_, &buf, block_size_with_trailer_));
|
2021-04-01 17:06:55 +00:00
|
|
|
if (io_status_.ok()) {
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
heap_buf_ = CacheAllocationPtr(buf.release());
|
2017-12-11 23:16:37 +00:00
|
|
|
used_buf_ = heap_buf_.get();
|
|
|
|
slice_ = Slice(heap_buf_.get(), block_size_);
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
ProcessTrailerIfPresent();
|
2017-12-11 23:16:37 +00:00
|
|
|
return true;
|
2021-04-26 19:43:02 +00:00
|
|
|
} else if (!io_status_.IsNotFound() && ioptions_.logger) {
|
2021-04-01 17:06:55 +00:00
|
|
|
assert(!io_status_.ok());
|
2021-04-26 19:43:02 +00:00
|
|
|
ROCKS_LOG_INFO(ioptions_.logger,
|
2017-12-11 23:16:37 +00:00
|
|
|
"Error reading from persistent cache. %s",
|
2021-04-01 17:06:55 +00:00
|
|
|
io_status_.ToString().c_str());
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:13:08 +00:00
|
|
|
inline void BlockFetcher::PrepareBufferForBlockFromFile() {
|
2017-12-11 23:16:37 +00:00
|
|
|
// cache miss read from device
|
2020-06-30 22:38:59 +00:00
|
|
|
if ((do_uncompress_ || ioptions_.allow_mmap_reads) &&
|
2020-04-24 22:30:12 +00:00
|
|
|
block_size_with_trailer_ < kDefaultStackBufferSize) {
|
2022-06-22 22:45:21 +00:00
|
|
|
// If we've got a small enough chunk of data, read it in to the
|
2017-12-11 23:16:37 +00:00
|
|
|
// trivially allocated stack buffer instead of needing a full malloc()
|
2020-06-30 22:38:59 +00:00
|
|
|
//
|
|
|
|
// `GetBlockContents()` cannot return this data as its lifetime is tied to
|
|
|
|
// this `BlockFetcher`'s lifetime. That is fine because this is only used
|
|
|
|
// in cases where we do not expect the `GetBlockContents()` result to be the
|
|
|
|
// same buffer we are assigning here. If we guess incorrectly, there will be
|
|
|
|
// a heap allocation and memcpy in `GetBlockContents()` to obtain the final
|
|
|
|
// result. Considering we are eliding a heap allocation here by using the
|
|
|
|
// stack buffer, the cost of guessing incorrectly here is one extra memcpy.
|
|
|
|
//
|
|
|
|
// When `do_uncompress_` is true, we expect the uncompression step will
|
|
|
|
// allocate heap memory for the final result. However this expectation will
|
|
|
|
// be wrong if the block turns out to already be uncompressed, which we
|
|
|
|
// won't know for sure until after reading it.
|
|
|
|
//
|
|
|
|
// When `ioptions_.allow_mmap_reads` is true, we do not expect the file
|
|
|
|
// reader to use the scratch buffer at all, but instead return a pointer
|
|
|
|
// into the mapped memory. This expectation will be wrong when using a
|
|
|
|
// file reader that does not implement mmap reads properly.
|
2017-12-11 23:16:37 +00:00
|
|
|
used_buf_ = &stack_buf_[0];
|
2018-11-29 01:58:08 +00:00
|
|
|
} else if (maybe_compressed_ && !do_uncompress_) {
|
2022-10-25 18:50:38 +00:00
|
|
|
compressed_buf_ =
|
|
|
|
AllocateBlock(block_size_with_trailer_, memory_allocator_compressed_);
|
2018-11-29 01:58:08 +00:00
|
|
|
used_buf_ = compressed_buf_.get();
|
2017-12-11 23:16:37 +00:00
|
|
|
} else {
|
2022-10-25 18:50:38 +00:00
|
|
|
heap_buf_ = AllocateBlock(block_size_with_trailer_, memory_allocator_);
|
2017-12-11 23:16:37 +00:00
|
|
|
used_buf_ = heap_buf_.get();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:13:08 +00:00
|
|
|
inline void BlockFetcher::InsertCompressedBlockToPersistentCacheIfNeeded() {
|
2021-04-01 17:06:55 +00:00
|
|
|
if (io_status_.ok() && read_options_.fill_cache &&
|
2017-12-11 23:16:37 +00:00
|
|
|
cache_options_.persistent_cache &&
|
|
|
|
cache_options_.persistent_cache->IsCompressed()) {
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
PersistentCacheHelper::InsertSerialized(cache_options_, handle_, used_buf_,
|
|
|
|
block_size_with_trailer_);
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:13:08 +00:00
|
|
|
inline void BlockFetcher::InsertUncompressedBlockToPersistentCacheIfNeeded() {
|
2021-04-01 17:06:55 +00:00
|
|
|
if (io_status_.ok() && !got_from_prefetch_buffer_ &&
|
|
|
|
read_options_.fill_cache && cache_options_.persistent_cache &&
|
2017-12-11 23:16:37 +00:00
|
|
|
!cache_options_.persistent_cache->IsCompressed()) {
|
|
|
|
// insert to uncompressed cache
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
PersistentCacheHelper::InsertUncompressed(cache_options_, handle_,
|
|
|
|
*contents_);
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-24 22:30:12 +00:00
|
|
|
inline void BlockFetcher::CopyBufferToHeapBuf() {
|
2018-11-29 01:58:08 +00:00
|
|
|
assert(used_buf_ != heap_buf_.get());
|
2020-04-24 22:30:12 +00:00
|
|
|
heap_buf_ = AllocateBlock(block_size_with_trailer_, memory_allocator_);
|
|
|
|
memcpy(heap_buf_.get(), used_buf_, block_size_with_trailer_);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
num_heap_buf_memcpy_++;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void BlockFetcher::CopyBufferToCompressedBuf() {
|
|
|
|
assert(used_buf_ != compressed_buf_.get());
|
2022-10-25 18:50:38 +00:00
|
|
|
compressed_buf_ =
|
|
|
|
AllocateBlock(block_size_with_trailer_, memory_allocator_compressed_);
|
2020-04-24 22:30:12 +00:00
|
|
|
memcpy(compressed_buf_.get(), used_buf_, block_size_with_trailer_);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
num_compressed_buf_memcpy_++;
|
|
|
|
#endif
|
2018-11-29 01:58:08 +00:00
|
|
|
}
|
|
|
|
|
2024-01-29 23:08:20 +00:00
|
|
|
// Before - Entering this method means the block is uncompressed or do not need
|
|
|
|
// to be uncompressed.
|
|
|
|
//
|
|
|
|
// The block can be in one of the following buffers:
|
2020-04-24 22:30:12 +00:00
|
|
|
// 1. prefetch buffer if prefetch is enabled and the block is prefetched before
|
|
|
|
// 2. stack_buf_ if block size is smaller than the stack_buf_ size and block
|
|
|
|
// is not compressed
|
|
|
|
// 3. heap_buf_ if the block is not compressed
|
|
|
|
// 4. compressed_buf_ if the block is compressed
|
2024-01-29 23:08:20 +00:00
|
|
|
// 5. direct_io_buf_ if direct IO is enabled or
|
|
|
|
// 6. underlying file_system scratch is used (FSReadRequest.fs_scratch).
|
|
|
|
//
|
|
|
|
// After - After this method, if the block is compressed, it should be in
|
|
|
|
// compressed_buf_ and heap_buf_ points to compressed_buf_, otherwise should be
|
|
|
|
// in heap_buf_.
|
2019-03-27 23:13:08 +00:00
|
|
|
inline void BlockFetcher::GetBlockContents() {
|
2017-12-11 23:16:37 +00:00
|
|
|
if (slice_.data() != used_buf_) {
|
|
|
|
// the slice content is not the buffer provided
|
2018-11-14 01:00:49 +00:00
|
|
|
*contents_ = BlockContents(Slice(slice_.data(), block_size_));
|
2017-12-11 23:16:37 +00:00
|
|
|
} else {
|
2018-07-06 20:09:57 +00:00
|
|
|
// page can be either uncompressed or compressed, the buffer either stack
|
|
|
|
// or heap provided. Refer to https://github.com/facebook/rocksdb/pull/4096
|
2017-12-11 23:16:37 +00:00
|
|
|
if (got_from_prefetch_buffer_ || used_buf_ == &stack_buf_[0]) {
|
2020-04-24 22:30:12 +00:00
|
|
|
CopyBufferToHeapBuf();
|
2018-11-29 01:58:08 +00:00
|
|
|
} else if (used_buf_ == compressed_buf_.get()) {
|
|
|
|
if (compression_type_ == kNoCompression &&
|
|
|
|
memory_allocator_ != memory_allocator_compressed_) {
|
2020-04-24 22:30:12 +00:00
|
|
|
CopyBufferToHeapBuf();
|
2018-11-29 01:58:08 +00:00
|
|
|
} else {
|
|
|
|
heap_buf_ = std::move(compressed_buf_);
|
|
|
|
}
|
2024-01-29 23:08:20 +00:00
|
|
|
} else if (direct_io_buf_.get() != nullptr || use_fs_scratch_) {
|
2020-04-24 22:30:12 +00:00
|
|
|
if (compression_type_ == kNoCompression) {
|
|
|
|
CopyBufferToHeapBuf();
|
|
|
|
} else {
|
|
|
|
CopyBufferToCompressedBuf();
|
|
|
|
heap_buf_ = std::move(compressed_buf_);
|
|
|
|
}
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
2018-11-14 01:00:49 +00:00
|
|
|
*contents_ = BlockContents(std::move(heap_buf_), block_size_);
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
2018-11-14 01:00:49 +00:00
|
|
|
#ifndef NDEBUG
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
contents_->has_trailer = footer_.GetBlockTrailerSize() > 0;
|
2018-11-14 01:00:49 +00:00
|
|
|
#endif
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
// Read a block from the file and verify its checksum. Upon return, io_status_
|
|
|
|
// will be updated with the status of the read, and slice_ will be updated
|
|
|
|
// with a pointer to the data.
|
2024-05-30 19:33:58 +00:00
|
|
|
void BlockFetcher::ReadBlock(bool retry) {
|
2024-01-29 23:08:20 +00:00
|
|
|
FSReadRequest read_req;
|
2024-03-18 23:16:05 +00:00
|
|
|
IOOptions opts;
|
|
|
|
io_status_ = file_->PrepareIOOptions(read_options_, opts);
|
|
|
|
opts.verify_and_reconstruct_read = retry;
|
2024-01-29 23:08:20 +00:00
|
|
|
read_req.status.PermitUncheckedError();
|
2024-03-18 23:16:05 +00:00
|
|
|
// Actual file read
|
|
|
|
if (io_status_.ok()) {
|
|
|
|
if (file_->use_direct_io()) {
|
|
|
|
PERF_TIMER_GUARD(block_read_time);
|
|
|
|
PERF_CPU_TIMER_GUARD(
|
|
|
|
block_read_cpu_time,
|
|
|
|
ioptions_.env ? ioptions_.env->GetSystemClock().get() : nullptr);
|
|
|
|
io_status_ = file_->Read(opts, handle_.offset(), block_size_with_trailer_,
|
|
|
|
&slice_, /*scratch=*/nullptr, &direct_io_buf_);
|
|
|
|
PERF_COUNTER_ADD(block_read_count, 1);
|
|
|
|
used_buf_ = const_cast<char*>(slice_.data());
|
|
|
|
} else if (use_fs_scratch_) {
|
|
|
|
PERF_TIMER_GUARD(block_read_time);
|
|
|
|
PERF_CPU_TIMER_GUARD(
|
|
|
|
block_read_cpu_time,
|
|
|
|
ioptions_.env ? ioptions_.env->GetSystemClock().get() : nullptr);
|
|
|
|
read_req.offset = handle_.offset();
|
|
|
|
read_req.len = block_size_with_trailer_;
|
|
|
|
read_req.scratch = nullptr;
|
|
|
|
io_status_ = file_->MultiRead(opts, &read_req, /*num_reqs=*/1,
|
|
|
|
/*AlignedBuf* =*/nullptr);
|
|
|
|
PERF_COUNTER_ADD(block_read_count, 1);
|
2024-01-29 23:08:20 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
slice_ = Slice(read_req.result.data(), read_req.result.size());
|
|
|
|
used_buf_ = const_cast<char*>(slice_.data());
|
|
|
|
} else {
|
|
|
|
// It allocates/assign used_buf_
|
|
|
|
PrepareBufferForBlockFromFile();
|
2024-01-29 23:08:20 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
PERF_TIMER_GUARD(block_read_time);
|
|
|
|
PERF_CPU_TIMER_GUARD(
|
|
|
|
block_read_cpu_time,
|
|
|
|
ioptions_.env ? ioptions_.env->GetSystemClock().get() : nullptr);
|
2024-01-29 23:08:20 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
io_status_ = file_->Read(
|
|
|
|
opts, handle_.offset(), /*size*/ block_size_with_trailer_,
|
|
|
|
/*result*/ &slice_, /*scratch*/ used_buf_, /*aligned_buf=*/nullptr);
|
|
|
|
PERF_COUNTER_ADD(block_read_count, 1);
|
2020-04-24 22:30:12 +00:00
|
|
|
#ifndef NDEBUG
|
2024-03-18 23:16:05 +00:00
|
|
|
if (slice_.data() == &stack_buf_[0]) {
|
|
|
|
num_stack_buf_memcpy_++;
|
|
|
|
} else if (slice_.data() == heap_buf_.get()) {
|
|
|
|
num_heap_buf_memcpy_++;
|
|
|
|
} else if (slice_.data() == compressed_buf_.get()) {
|
|
|
|
num_compressed_buf_memcpy_++;
|
2020-04-30 21:48:51 +00:00
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
#endif
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
}
|
2019-06-19 02:00:03 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
// TODO: introduce dedicated perf counter for range tombstones
|
|
|
|
switch (block_type_) {
|
|
|
|
case BlockType::kFilter:
|
|
|
|
case BlockType::kFilterPartitionIndex:
|
|
|
|
PERF_COUNTER_ADD(filter_block_read_count, 1);
|
|
|
|
break;
|
2019-06-19 02:00:03 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
case BlockType::kCompressionDictionary:
|
|
|
|
PERF_COUNTER_ADD(compression_dict_block_read_count, 1);
|
|
|
|
break;
|
2019-06-19 02:00:03 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
case BlockType::kIndex:
|
|
|
|
PERF_COUNTER_ADD(index_block_read_count, 1);
|
|
|
|
break;
|
2019-06-19 02:00:03 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
// Nothing to do here as we don't have counters for the other types.
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-12-11 23:16:37 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
PERF_COUNTER_ADD(block_read_byte, block_size_with_trailer_);
|
|
|
|
if (io_status_.ok()) {
|
2024-01-29 23:08:20 +00:00
|
|
|
if (use_fs_scratch_ && !read_req.status.ok()) {
|
2024-03-18 23:16:05 +00:00
|
|
|
io_status_ = read_req.status;
|
|
|
|
} else if (slice_.size() != block_size_with_trailer_) {
|
|
|
|
io_status_ = IOStatus::Corruption(
|
2022-05-06 20:03:58 +00:00
|
|
|
"truncated block read from " + file_->file_name() + " offset " +
|
|
|
|
std::to_string(handle_.offset()) + ", expected " +
|
|
|
|
std::to_string(block_size_with_trailer_) + " bytes, got " +
|
|
|
|
std::to_string(slice_.size()));
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
}
|
2017-12-11 23:16:37 +00:00
|
|
|
|
2024-03-18 23:16:05 +00:00
|
|
|
if (io_status_.ok()) {
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
ProcessTrailerIfPresent();
|
2024-03-18 23:16:05 +00:00
|
|
|
}
|
|
|
|
|
2024-08-12 22:32:07 +00:00
|
|
|
if (retry) {
|
|
|
|
RecordTick(ioptions_.stats, FILE_READ_CORRUPTION_RETRY_COUNT);
|
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
if (io_status_.ok()) {
|
|
|
|
InsertCompressedBlockToPersistentCacheIfNeeded();
|
2024-05-30 19:33:58 +00:00
|
|
|
fs_buf_ = std::move(read_req.fs_scratch);
|
2024-08-12 22:32:07 +00:00
|
|
|
if (retry) {
|
|
|
|
RecordTick(ioptions_.stats, FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT);
|
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
} else {
|
|
|
|
ReleaseFileSystemProvidedBuffer(&read_req);
|
|
|
|
direct_io_buf_.reset();
|
|
|
|
compressed_buf_.reset();
|
|
|
|
heap_buf_.reset();
|
|
|
|
used_buf_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
IOStatus BlockFetcher::ReadBlockContents() {
|
|
|
|
if (TryGetUncompressBlockFromPersistentCache()) {
|
|
|
|
compression_type_ = kNoCompression;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
contents_->has_trailer = footer_.GetBlockTrailerSize() > 0;
|
|
|
|
#endif // NDEBUG
|
|
|
|
return IOStatus::OK();
|
|
|
|
}
|
|
|
|
if (TryGetFromPrefetchBuffer()) {
|
2024-08-12 22:32:07 +00:00
|
|
|
if (io_status_.IsCorruption() && retry_corrupt_read_) {
|
|
|
|
ReadBlock(/*retry=*/true);
|
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
if (!io_status_.ok()) {
|
2024-08-12 22:32:07 +00:00
|
|
|
assert(!fs_buf_);
|
2024-03-18 23:16:05 +00:00
|
|
|
return io_status_;
|
|
|
|
}
|
|
|
|
} else if (!TryGetSerializedBlockFromPersistentCache()) {
|
2024-05-30 19:33:58 +00:00
|
|
|
ReadBlock(/*retry =*/false);
|
2024-03-18 23:16:05 +00:00
|
|
|
// If the file system supports retry after corruption, then try to
|
|
|
|
// re-read the block and see if it succeeds.
|
|
|
|
if (io_status_.IsCorruption() && retry_corrupt_read_) {
|
2024-05-30 19:33:58 +00:00
|
|
|
assert(!fs_buf_);
|
|
|
|
ReadBlock(/*retry=*/true);
|
2024-03-18 23:16:05 +00:00
|
|
|
}
|
|
|
|
if (!io_status_.ok()) {
|
2024-05-30 19:33:58 +00:00
|
|
|
assert(!fs_buf_);
|
2021-04-01 17:06:55 +00:00
|
|
|
return io_status_;
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-14 01:00:49 +00:00
|
|
|
if (do_uncompress_ && compression_type_ != kNoCompression) {
|
2020-04-07 19:51:34 +00:00
|
|
|
PERF_TIMER_GUARD(block_decompress_time);
|
2017-12-11 23:16:37 +00:00
|
|
|
// compressed page, uncompress, update cache
|
2019-01-19 03:10:17 +00:00
|
|
|
UncompressionContext context(compression_type_);
|
2019-01-24 02:11:08 +00:00
|
|
|
UncompressionInfo info(context, uncompression_dict_, compression_type_);
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
io_status_ = status_to_io_status(UncompressSerializedBlock(
|
2021-12-10 16:12:09 +00:00
|
|
|
info, slice_.data(), block_size_, contents_, footer_.format_version(),
|
2021-04-01 17:06:55 +00:00
|
|
|
ioptions_, memory_allocator_));
|
2020-04-24 22:30:12 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
num_heap_buf_memcpy_++;
|
|
|
|
#endif
|
Support compressed and local flash secondary cache stacking (#11812)
Summary:
This PR implements support for a three tier cache - primary block cache, compressed secondary cache, and a nvm (local flash) secondary cache. This allows more effective utilization of the nvm cache, and minimizes the number of reads from local flash by caching compressed blocks in the compressed secondary cache.
The basic design is as follows -
1. A new secondary cache implementation, ```TieredSecondaryCache```, is introduced. It keeps the compressed and nvm secondary caches and manages the movement of blocks between them and the primary block cache. To setup a three tier cache, we allocate a ```CacheWithSecondaryAdapter```, with a ```TieredSecondaryCache``` instance as the secondary cache.
2. The table reader passes both the uncompressed and compressed block to ```FullTypedCacheInterface::InsertFull```, allowing the block cache to optionally store the compressed block.
3. When there's a miss, the block object is constructed and inserted in the primary cache, and the compressed block is inserted into the nvm cache by calling ```InsertSaved```. This avoids the overhead of recompressing the block, as well as avoiding putting more memory pressure on the compressed secondary cache.
4. When there's a hit in the nvm cache, we attempt to insert the block in the compressed secondary cache and the primary cache, subject to the admission policy of those caches (i.e admit on second access). Blocks/items evicted from any tier are simply discarded.
We can easily implement additional admission policies if desired.
Todo (In a subsequent PR):
1. Add to db_bench and run benchmarks
2. Add to db_stress
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11812
Reviewed By: pdillinger
Differential Revision: D49461842
Pulled By: anand1976
fbshipit-source-id: b40ac1330ef7cd8c12efa0a3ca75128e602e3a0b
2023-09-22 03:30:53 +00:00
|
|
|
// Save the compressed block without trailer
|
|
|
|
slice_ = Slice(slice_.data(), block_size_);
|
2017-12-11 23:16:37 +00:00
|
|
|
} else {
|
|
|
|
GetBlockContents();
|
Support compressed and local flash secondary cache stacking (#11812)
Summary:
This PR implements support for a three tier cache - primary block cache, compressed secondary cache, and a nvm (local flash) secondary cache. This allows more effective utilization of the nvm cache, and minimizes the number of reads from local flash by caching compressed blocks in the compressed secondary cache.
The basic design is as follows -
1. A new secondary cache implementation, ```TieredSecondaryCache```, is introduced. It keeps the compressed and nvm secondary caches and manages the movement of blocks between them and the primary block cache. To setup a three tier cache, we allocate a ```CacheWithSecondaryAdapter```, with a ```TieredSecondaryCache``` instance as the secondary cache.
2. The table reader passes both the uncompressed and compressed block to ```FullTypedCacheInterface::InsertFull```, allowing the block cache to optionally store the compressed block.
3. When there's a miss, the block object is constructed and inserted in the primary cache, and the compressed block is inserted into the nvm cache by calling ```InsertSaved```. This avoids the overhead of recompressing the block, as well as avoiding putting more memory pressure on the compressed secondary cache.
4. When there's a hit in the nvm cache, we attempt to insert the block in the compressed secondary cache and the primary cache, subject to the admission policy of those caches (i.e admit on second access). Blocks/items evicted from any tier are simply discarded.
We can easily implement additional admission policies if desired.
Todo (In a subsequent PR):
1. Add to db_bench and run benchmarks
2. Add to db_stress
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11812
Reviewed By: pdillinger
Differential Revision: D49461842
Pulled By: anand1976
fbshipit-source-id: b40ac1330ef7cd8c12efa0a3ca75128e602e3a0b
2023-09-22 03:30:53 +00:00
|
|
|
slice_ = Slice();
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
InsertUncompressedBlockToPersistentCacheIfNeeded();
|
|
|
|
|
2021-04-01 17:06:55 +00:00
|
|
|
return io_status_;
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
IOStatus BlockFetcher::ReadAsyncBlockContents() {
|
|
|
|
if (TryGetUncompressBlockFromPersistentCache()) {
|
|
|
|
compression_type_ = kNoCompression;
|
|
|
|
#ifndef NDEBUG
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
contents_->has_trailer = footer_.GetBlockTrailerSize() > 0;
|
2022-05-20 23:09:33 +00:00
|
|
|
#endif // NDEBUG
|
|
|
|
return IOStatus::OK();
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
} else if (!TryGetSerializedBlockFromPersistentCache()) {
|
2022-05-23 19:15:26 +00:00
|
|
|
assert(prefetch_buffer_ != nullptr);
|
|
|
|
if (!for_compaction_) {
|
2022-05-20 23:09:33 +00:00
|
|
|
IOOptions opts;
|
|
|
|
IOStatus io_s = file_->PrepareIOOptions(read_options_, opts);
|
2022-05-23 19:15:26 +00:00
|
|
|
if (!io_s.ok()) {
|
|
|
|
return io_s;
|
|
|
|
}
|
|
|
|
io_s = status_to_io_status(prefetch_buffer_->PrefetchAsync(
|
2022-07-06 18:42:59 +00:00
|
|
|
opts, file_, handle_.offset(), block_size_with_trailer_, &slice_));
|
2022-05-23 19:15:26 +00:00
|
|
|
if (io_s.IsTryAgain()) {
|
|
|
|
return io_s;
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
if (io_s.ok()) {
|
|
|
|
// Data Block is already in prefetch.
|
|
|
|
got_from_prefetch_buffer_ = true;
|
|
|
|
ProcessTrailerIfPresent();
|
2024-03-18 23:16:05 +00:00
|
|
|
if (io_status_.IsCorruption() && retry_corrupt_read_) {
|
|
|
|
got_from_prefetch_buffer_ = false;
|
2024-05-30 19:33:58 +00:00
|
|
|
ReadBlock(/*retry = */ true);
|
2024-03-18 23:16:05 +00:00
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
if (!io_status_.ok()) {
|
2024-05-30 19:33:58 +00:00
|
|
|
assert(!fs_buf_);
|
2022-05-20 23:09:33 +00:00
|
|
|
return io_status_;
|
|
|
|
}
|
|
|
|
used_buf_ = const_cast<char*>(slice_.data());
|
|
|
|
|
|
|
|
if (do_uncompress_ && compression_type_ != kNoCompression) {
|
|
|
|
PERF_TIMER_GUARD(block_decompress_time);
|
|
|
|
// compressed page, uncompress, update cache
|
|
|
|
UncompressionContext context(compression_type_);
|
|
|
|
UncompressionInfo info(context, uncompression_dict_,
|
|
|
|
compression_type_);
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
io_status_ = status_to_io_status(UncompressSerializedBlock(
|
2022-05-20 23:09:33 +00:00
|
|
|
info, slice_.data(), block_size_, contents_,
|
|
|
|
footer_.format_version(), ioptions_, memory_allocator_));
|
|
|
|
#ifndef NDEBUG
|
|
|
|
num_heap_buf_memcpy_++;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
GetBlockContents();
|
|
|
|
}
|
|
|
|
InsertUncompressedBlockToPersistentCacheIfNeeded();
|
2022-05-23 19:15:26 +00:00
|
|
|
return io_status_;
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-23 19:15:26 +00:00
|
|
|
// Fallback to sequential reading of data blocks in case of io_s returns
|
|
|
|
// error or for_compaction_is true.
|
|
|
|
return ReadBlockContents();
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
return io_status_;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|