mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 16:30:56 +00:00
629605d645
Summary: **Context/Summary** As titled. The benefit of doing so is to explicitly call readahead() instead of relying page cache behavior for compaction read when we know that we most likely need readahead as compaction read is sequential read . **Test** Extended the existing UT to cover compaction read case Pull Request resolved: https://github.com/facebook/rocksdb/pull/11631 Reviewed By: ajkr Differential Revision: D47681437 Pulled By: hx235 fbshipit-source-id: 78792f64985c4dc44aa8f2a9c41ab3e8bbc0bc90
74 lines
2.9 KiB
C++
74 lines
2.9 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
#pragma once
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
class BlockPrefetcher {
|
|
public:
|
|
explicit BlockPrefetcher(size_t compaction_readahead_size,
|
|
size_t initial_auto_readahead_size)
|
|
: compaction_readahead_size_(compaction_readahead_size),
|
|
readahead_size_(initial_auto_readahead_size),
|
|
initial_auto_readahead_size_(initial_auto_readahead_size) {}
|
|
|
|
void PrefetchIfNeeded(const BlockBasedTable::Rep* rep,
|
|
const BlockHandle& handle, size_t readahead_size,
|
|
bool is_for_compaction,
|
|
const bool no_sequential_checking,
|
|
Env::IOPriority rate_limiter_priority);
|
|
FilePrefetchBuffer* prefetch_buffer() { return prefetch_buffer_.get(); }
|
|
|
|
void UpdateReadPattern(const uint64_t& offset, const size_t& len) {
|
|
prev_offset_ = offset;
|
|
prev_len_ = len;
|
|
}
|
|
|
|
bool IsBlockSequential(const uint64_t& offset) {
|
|
return (prev_len_ == 0 || (prev_offset_ + prev_len_ == offset));
|
|
}
|
|
|
|
void ResetValues(size_t initial_auto_readahead_size) {
|
|
num_file_reads_ = 1;
|
|
// Since initial_auto_readahead_size_ can be different from
|
|
// the value passed to BlockBasedTableOptions.initial_auto_readahead_size in
|
|
// case of adaptive_readahead, so fallback the readahead_size_ to that value
|
|
// in case of reset.
|
|
initial_auto_readahead_size_ = initial_auto_readahead_size;
|
|
readahead_size_ = initial_auto_readahead_size_;
|
|
readahead_limit_ = 0;
|
|
return;
|
|
}
|
|
|
|
void SetReadaheadState(ReadaheadFileInfo::ReadaheadInfo* readahead_info) {
|
|
num_file_reads_ = readahead_info->num_file_reads;
|
|
initial_auto_readahead_size_ = readahead_info->readahead_size;
|
|
TEST_SYNC_POINT_CALLBACK("BlockPrefetcher::SetReadaheadState",
|
|
&initial_auto_readahead_size_);
|
|
}
|
|
|
|
private:
|
|
// Readahead size used in compaction, its value is used only if
|
|
// lookup_context_.caller = kCompaction.
|
|
size_t compaction_readahead_size_;
|
|
|
|
// readahead_size_ is used in non-compaction read if underlying FS supports
|
|
// prefetching.
|
|
size_t readahead_size_;
|
|
size_t readahead_limit_ = 0;
|
|
// initial_auto_readahead_size_ is used in non-compaction read if RocksDB uses
|
|
// internal prefetch buffer.
|
|
uint64_t initial_auto_readahead_size_;
|
|
uint64_t num_file_reads_ = 0;
|
|
uint64_t prev_offset_ = 0;
|
|
size_t prev_len_ = 0;
|
|
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer_;
|
|
};
|
|
} // namespace ROCKSDB_NAMESPACE
|