2017-12-11 23:16:37 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
2024-01-29 23:08:20 +00:00
|
|
|
#include "file/file_util.h"
|
2023-05-17 18:27:09 +00:00
|
|
|
#include "memory/memory_allocator_impl.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block.h"
|
2019-06-19 02:00:03 +00:00
|
|
|
#include "table/block_based/block_type.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "table/format.h"
|
2021-12-10 16:12:09 +00:00
|
|
|
#include "table/persistent_cache_options.h"
|
2018-10-03 00:21:54 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-05-23 21:19:12 +00:00
|
|
|
|
|
|
|
// Retrieves a single block of a given file. Utilizes the prefetch buffer and/or
|
|
|
|
// persistent cache provided (if any) to try to avoid reading from the file
|
|
|
|
// directly. Note that both the prefetch buffer and the persistent cache are
|
2022-10-25 18:50:38 +00:00
|
|
|
// optional; also, note that the persistent cache may be configured to store
|
|
|
|
// either compressed or uncompressed blocks.
|
2019-05-23 21:19:12 +00:00
|
|
|
//
|
|
|
|
// If the retrieved block is compressed and the do_uncompress flag is set,
|
|
|
|
// BlockFetcher uncompresses the block (using the uncompression dictionary,
|
|
|
|
// if provided, to prime the compression algorithm), and returns the resulting
|
|
|
|
// uncompressed block data. Otherwise, it returns the original block.
|
|
|
|
//
|
|
|
|
// Two read options affect the behavior of BlockFetcher: if verify_checksums is
|
|
|
|
// true, the checksum of the (original) block is checked; if fill_cache is true,
|
|
|
|
// the block is added to the persistent cache if needed.
|
|
|
|
//
|
|
|
|
// Memory for uncompressed and compressed blocks is allocated as needed
|
|
|
|
// using memory_allocator and memory_allocator_compressed, respectively
|
|
|
|
// (if provided; otherwise, the default allocator is used).
|
|
|
|
|
2017-12-11 23:16:37 +00:00
|
|
|
class BlockFetcher {
|
|
|
|
public:
|
|
|
|
BlockFetcher(RandomAccessFileReader* file,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const Footer& footer /* ref retained */,
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const BlockHandle& handle /* ref retained */,
|
|
|
|
BlockContents* contents,
|
|
|
|
const ImmutableOptions& ioptions /* ref retained */,
|
2019-06-19 02:00:03 +00:00
|
|
|
bool do_uncompress, bool maybe_compressed, BlockType block_type,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
const UncompressionDict& uncompression_dict /* ref retained */,
|
|
|
|
const PersistentCacheOptions& cache_options /* ref retained */,
|
2018-11-29 01:58:08 +00:00
|
|
|
MemoryAllocator* memory_allocator = nullptr,
|
2019-06-19 21:07:36 +00:00
|
|
|
MemoryAllocator* memory_allocator_compressed = nullptr,
|
|
|
|
bool for_compaction = false)
|
2017-12-11 23:16:37 +00:00
|
|
|
: file_(file),
|
|
|
|
prefetch_buffer_(prefetch_buffer),
|
|
|
|
footer_(footer),
|
|
|
|
read_options_(read_options),
|
|
|
|
handle_(handle),
|
|
|
|
contents_(contents),
|
|
|
|
ioptions_(ioptions),
|
|
|
|
do_uncompress_(do_uncompress),
|
2018-11-29 01:58:08 +00:00
|
|
|
maybe_compressed_(maybe_compressed),
|
2019-06-19 02:00:03 +00:00
|
|
|
block_type_(block_type),
|
2020-04-24 22:30:12 +00:00
|
|
|
block_size_(static_cast<size_t>(handle_.size())),
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
block_size_with_trailer_(block_size_ + footer.GetBlockTrailerSize()),
|
2019-01-24 02:11:08 +00:00
|
|
|
uncompression_dict_(uncompression_dict),
|
2018-10-03 00:21:54 +00:00
|
|
|
cache_options_(cache_options),
|
2018-11-29 01:58:08 +00:00
|
|
|
memory_allocator_(memory_allocator),
|
2019-06-19 21:07:36 +00:00
|
|
|
memory_allocator_compressed_(memory_allocator_compressed),
|
2021-08-16 15:09:46 +00:00
|
|
|
for_compaction_(for_compaction) {
|
|
|
|
io_status_.PermitUncheckedError(); // TODO(AR) can we improve on this?
|
2024-01-29 23:08:20 +00:00
|
|
|
if (CheckFSFeatureSupport(ioptions_.fs.get(), FSSupportedOps::kFSBuffer)) {
|
|
|
|
use_fs_scratch_ = true;
|
|
|
|
}
|
2024-03-18 23:16:05 +00:00
|
|
|
if (CheckFSFeatureSupport(ioptions_.fs.get(),
|
|
|
|
FSSupportedOps::kVerifyAndReconstructRead)) {
|
|
|
|
retry_corrupt_read_ = true;
|
|
|
|
}
|
2021-08-16 15:09:46 +00:00
|
|
|
}
|
2019-06-19 21:07:36 +00:00
|
|
|
|
2021-04-01 17:06:55 +00:00
|
|
|
IOStatus ReadBlockContents();
|
2022-05-20 23:09:33 +00:00
|
|
|
IOStatus ReadAsyncBlockContents();
|
|
|
|
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
inline CompressionType get_compression_type() const {
|
|
|
|
return compression_type_;
|
|
|
|
}
|
|
|
|
inline size_t GetBlockSizeWithTrailer() const {
|
|
|
|
return block_size_with_trailer_;
|
|
|
|
}
|
Support compressed and local flash secondary cache stacking (#11812)
Summary:
This PR implements support for a three tier cache - primary block cache, compressed secondary cache, and a nvm (local flash) secondary cache. This allows more effective utilization of the nvm cache, and minimizes the number of reads from local flash by caching compressed blocks in the compressed secondary cache.
The basic design is as follows -
1. A new secondary cache implementation, ```TieredSecondaryCache```, is introduced. It keeps the compressed and nvm secondary caches and manages the movement of blocks between them and the primary block cache. To setup a three tier cache, we allocate a ```CacheWithSecondaryAdapter```, with a ```TieredSecondaryCache``` instance as the secondary cache.
2. The table reader passes both the uncompressed and compressed block to ```FullTypedCacheInterface::InsertFull```, allowing the block cache to optionally store the compressed block.
3. When there's a miss, the block object is constructed and inserted in the primary cache, and the compressed block is inserted into the nvm cache by calling ```InsertSaved```. This avoids the overhead of recompressing the block, as well as avoiding putting more memory pressure on the compressed secondary cache.
4. When there's a hit in the nvm cache, we attempt to insert the block in the compressed secondary cache and the primary cache, subject to the admission policy of those caches (i.e admit on second access). Blocks/items evicted from any tier are simply discarded.
We can easily implement additional admission policies if desired.
Todo (In a subsequent PR):
1. Add to db_bench and run benchmarks
2. Add to db_stress
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11812
Reviewed By: pdillinger
Differential Revision: D49461842
Pulled By: anand1976
fbshipit-source-id: b40ac1330ef7cd8c12efa0a3ca75128e602e3a0b
2023-09-22 03:30:53 +00:00
|
|
|
inline Slice& GetCompressedBlock() {
|
|
|
|
assert(compression_type_ != kNoCompression);
|
|
|
|
return slice_;
|
|
|
|
}
|
2017-12-11 23:16:37 +00:00
|
|
|
|
2020-04-24 22:30:12 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
int TEST_GetNumStackBufMemcpy() const { return num_stack_buf_memcpy_; }
|
|
|
|
int TEST_GetNumHeapBufMemcpy() const { return num_heap_buf_memcpy_; }
|
|
|
|
int TEST_GetNumCompressedBufMemcpy() const {
|
|
|
|
return num_compressed_buf_memcpy_;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2017-12-11 23:16:37 +00:00
|
|
|
private:
|
2020-04-24 22:30:12 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
int num_stack_buf_memcpy_ = 0;
|
|
|
|
int num_heap_buf_memcpy_ = 0;
|
|
|
|
int num_compressed_buf_memcpy_ = 0;
|
|
|
|
|
|
|
|
#endif
|
2017-12-11 23:16:37 +00:00
|
|
|
static const uint32_t kDefaultStackBufferSize = 5000;
|
|
|
|
|
|
|
|
RandomAccessFileReader* file_;
|
|
|
|
FilePrefetchBuffer* prefetch_buffer_;
|
|
|
|
const Footer& footer_;
|
2017-12-12 20:08:52 +00:00
|
|
|
const ReadOptions read_options_;
|
2017-12-11 23:16:37 +00:00
|
|
|
const BlockHandle& handle_;
|
|
|
|
BlockContents* contents_;
|
2021-05-05 20:59:21 +00:00
|
|
|
const ImmutableOptions& ioptions_;
|
2020-04-24 22:30:12 +00:00
|
|
|
const bool do_uncompress_;
|
|
|
|
const bool maybe_compressed_;
|
|
|
|
const BlockType block_type_;
|
|
|
|
const size_t block_size_;
|
|
|
|
const size_t block_size_with_trailer_;
|
2019-01-24 02:11:08 +00:00
|
|
|
const UncompressionDict& uncompression_dict_;
|
2017-12-11 23:16:37 +00:00
|
|
|
const PersistentCacheOptions& cache_options_;
|
2018-11-29 01:58:08 +00:00
|
|
|
MemoryAllocator* memory_allocator_;
|
|
|
|
MemoryAllocator* memory_allocator_compressed_;
|
2021-04-01 17:06:55 +00:00
|
|
|
IOStatus io_status_;
|
2017-12-11 23:16:37 +00:00
|
|
|
Slice slice_;
|
|
|
|
char* used_buf_ = nullptr;
|
2020-04-24 22:30:12 +00:00
|
|
|
AlignedBuf direct_io_buf_;
|
2018-10-03 00:21:54 +00:00
|
|
|
CacheAllocationPtr heap_buf_;
|
2018-11-29 01:58:08 +00:00
|
|
|
CacheAllocationPtr compressed_buf_;
|
2017-12-11 23:16:37 +00:00
|
|
|
char stack_buf_[kDefaultStackBufferSize];
|
|
|
|
bool got_from_prefetch_buffer_ = false;
|
2020-04-24 22:30:12 +00:00
|
|
|
CompressionType compression_type_;
|
2019-06-19 21:07:36 +00:00
|
|
|
bool for_compaction_ = false;
|
2024-01-29 23:08:20 +00:00
|
|
|
bool use_fs_scratch_ = false;
|
2024-03-18 23:16:05 +00:00
|
|
|
bool retry_corrupt_read_ = false;
|
2017-12-11 23:16:37 +00:00
|
|
|
|
|
|
|
// return true if found
|
|
|
|
bool TryGetUncompressBlockFromPersistentCache();
|
|
|
|
// return true if found
|
|
|
|
bool TryGetFromPrefetchBuffer();
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
bool TryGetSerializedBlockFromPersistentCache();
|
2017-12-11 23:16:37 +00:00
|
|
|
void PrepareBufferForBlockFromFile();
|
2020-04-24 22:30:12 +00:00
|
|
|
// Copy content from used_buf_ to new heap_buf_.
|
|
|
|
void CopyBufferToHeapBuf();
|
|
|
|
// Copy content from used_buf_ to new compressed_buf_.
|
|
|
|
void CopyBufferToCompressedBuf();
|
2017-12-11 23:16:37 +00:00
|
|
|
void GetBlockContents();
|
|
|
|
void InsertCompressedBlockToPersistentCacheIfNeeded();
|
|
|
|
void InsertUncompressedBlockToPersistentCacheIfNeeded();
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
void ProcessTrailerIfPresent();
|
2024-03-21 23:19:09 +00:00
|
|
|
void ReadBlock(bool retry, FSAllocationPtr& fs_buf);
|
2024-01-29 23:08:20 +00:00
|
|
|
|
|
|
|
void ReleaseFileSystemProvidedBuffer(FSReadRequest* read_req) {
|
|
|
|
if (use_fs_scratch_) {
|
|
|
|
// Free the scratch buffer allocated by FileSystem.
|
|
|
|
if (read_req->fs_scratch != nullptr) {
|
|
|
|
read_req->fs_scratch.reset();
|
|
|
|
read_req->fs_scratch = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-12-11 23:16:37 +00:00
|
|
|
};
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|