2019-09-16 17:31:27 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "file/file_prefetch_buffer.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
|
|
#include "monitoring/histogram.h"
|
|
|
|
#include "monitoring/iostats_context_imp.h"
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/rate_limiter.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
void FilePrefetchBuffer::CalculateOffsetAndLen(size_t alignment,
|
|
|
|
uint64_t offset,
|
|
|
|
size_t roundup_len, size_t index,
|
|
|
|
bool refit_tail,
|
|
|
|
uint64_t& chunk_len) {
|
|
|
|
uint64_t chunk_offset_in_buffer = 0;
|
|
|
|
bool copy_data_to_new_buffer = false;
|
|
|
|
// Check if requested bytes are in the existing buffer_.
|
|
|
|
// If only a few bytes exist -- reuse them & read only what is really needed.
|
|
|
|
// This is typically the case of incremental reading of data.
|
|
|
|
// If no bytes exist in buffer -- full pread.
|
|
|
|
if (bufs_[index].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset >= bufs_[index].offset_ &&
|
|
|
|
offset <= bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize()) {
|
|
|
|
// Only a few requested bytes are in the buffer. memmove those chunk of
|
|
|
|
// bytes to the beginning, and memcpy them back into the new buffer if a
|
|
|
|
// new buffer is created.
|
|
|
|
chunk_offset_in_buffer = Rounddown(
|
|
|
|
static_cast<size_t>(offset - bufs_[index].offset_), alignment);
|
|
|
|
chunk_len = static_cast<uint64_t>(bufs_[index].buffer_.CurrentSize()) -
|
|
|
|
chunk_offset_in_buffer;
|
|
|
|
assert(chunk_offset_in_buffer % alignment == 0);
|
|
|
|
// assert(chunk_len % alignment == 0);
|
|
|
|
assert(chunk_offset_in_buffer + chunk_len <=
|
|
|
|
bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize());
|
|
|
|
if (chunk_len > 0) {
|
|
|
|
copy_data_to_new_buffer = true;
|
|
|
|
} else {
|
|
|
|
// this reset is not necessary, but just to be safe.
|
|
|
|
chunk_offset_in_buffer = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new buffer only if current capacity is not sufficient, and memcopy
|
|
|
|
// bytes from old buffer if needed (i.e., if chunk_len is greater than 0).
|
|
|
|
if (bufs_[index].buffer_.Capacity() < roundup_len) {
|
|
|
|
bufs_[index].buffer_.Alignment(alignment);
|
|
|
|
bufs_[index].buffer_.AllocateNewBuffer(
|
|
|
|
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
|
|
|
|
chunk_offset_in_buffer, static_cast<size_t>(chunk_len));
|
|
|
|
} else if (chunk_len > 0 && refit_tail) {
|
|
|
|
// New buffer not needed. But memmove bytes from tail to the beginning since
|
|
|
|
// chunk_len is greater than 0.
|
|
|
|
bufs_[index].buffer_.RefitTail(static_cast<size_t>(chunk_offset_in_buffer),
|
|
|
|
static_cast<size_t>(chunk_len));
|
2022-03-26 01:26:22 +00:00
|
|
|
} else if (chunk_len > 0) {
|
|
|
|
// For async prefetching, it doesn't call RefitTail with chunk_len > 0.
|
|
|
|
// Allocate new buffer if needed because aligned buffer calculate remaining
|
|
|
|
// buffer as capacity_ - cursize_ which might not be the case in this as we
|
|
|
|
// are not refitting.
|
|
|
|
// TODO akanksha: Update the condition when asynchronous prefetching is
|
|
|
|
// stable.
|
|
|
|
bufs_[index].buffer_.Alignment(alignment);
|
|
|
|
bufs_[index].buffer_.AllocateNewBuffer(
|
|
|
|
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
|
|
|
|
chunk_offset_in_buffer, static_cast<size_t>(chunk_len));
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status FilePrefetchBuffer::Read(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
Env::IOPriority rate_limiter_priority,
|
|
|
|
uint64_t read_len, uint64_t chunk_len,
|
|
|
|
uint64_t rounddown_start, uint32_t index) {
|
|
|
|
Slice result;
|
|
|
|
Status s = reader->Read(opts, rounddown_start + chunk_len, read_len, &result,
|
|
|
|
bufs_[index].buffer_.BufferStart() + chunk_len,
|
2022-07-06 18:42:59 +00:00
|
|
|
/*aligned_buf=*/nullptr, rate_limiter_priority);
|
2022-03-21 14:12:43 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (result.size() < read_len) {
|
|
|
|
// Fake an IO error to force db_stress fault injection to ignore
|
|
|
|
// truncated read errors
|
|
|
|
IGNORE_STATUS_IF_ERROR(Status::IOError());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the buffer offset and size.
|
|
|
|
bufs_[index].offset_ = rounddown_start;
|
|
|
|
bufs_[index].buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status FilePrefetchBuffer::ReadAsync(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t read_len, uint64_t chunk_len,
|
|
|
|
uint64_t rounddown_start, uint32_t index) {
|
|
|
|
// callback for async read request.
|
|
|
|
auto fp = std::bind(&FilePrefetchBuffer::PrefetchAsyncCallback, this,
|
|
|
|
std::placeholders::_1, std::placeholders::_2);
|
|
|
|
FSReadRequest req;
|
|
|
|
Slice result;
|
|
|
|
req.len = read_len;
|
|
|
|
req.offset = rounddown_start + chunk_len;
|
|
|
|
req.result = result;
|
|
|
|
req.scratch = bufs_[index].buffer_.BufferStart() + chunk_len;
|
2022-07-06 18:42:59 +00:00
|
|
|
Status s = reader->ReadAsync(req, opts, fp,
|
|
|
|
/*cb_arg=*/nullptr, &io_handle_, &del_fn_,
|
|
|
|
/*aligned_buf=*/nullptr);
|
2022-04-04 22:35:43 +00:00
|
|
|
req.status.PermitUncheckedError();
|
2022-03-21 14:12:43 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
async_read_in_progress_ = true;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:51:57 +00:00
|
|
|
Status FilePrefetchBuffer::Prefetch(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
2019-09-16 17:31:27 +00:00
|
|
|
uint64_t offset, size_t n,
|
2022-02-17 07:17:03 +00:00
|
|
|
Env::IOPriority rate_limiter_priority) {
|
2019-12-18 18:59:21 +00:00
|
|
|
if (!enable_ || reader == nullptr) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2020-08-28 01:15:11 +00:00
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::Prefetch:Start");
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
if (offset + n <= bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
|
|
|
// All requested bytes are already in the curr_ buffer. So no need to Read
|
|
|
|
// again.
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
|
|
|
size_t offset_ = static_cast<size_t>(offset);
|
|
|
|
uint64_t rounddown_offset = Rounddown(offset_, alignment);
|
|
|
|
uint64_t roundup_end = Roundup(offset_ + n, alignment);
|
|
|
|
uint64_t roundup_len = roundup_end - rounddown_offset;
|
|
|
|
assert(roundup_len >= alignment);
|
|
|
|
assert(roundup_len % alignment == 0);
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
uint64_t chunk_len = 0;
|
|
|
|
CalculateOffsetAndLen(alignment, offset, roundup_len, curr_,
|
|
|
|
true /*refit_tail*/, chunk_len);
|
|
|
|
size_t read_len = static_cast<size_t>(roundup_len - chunk_len);
|
|
|
|
|
|
|
|
Status s = Read(opts, reader, rate_limiter_priority, read_len, chunk_len,
|
|
|
|
rounddown_offset, curr_);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy data from src to third buffer.
|
|
|
|
void FilePrefetchBuffer::CopyDataToBuffer(uint32_t src, uint64_t& offset,
|
|
|
|
size_t& length) {
|
|
|
|
if (length == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
uint64_t copy_offset = (offset - bufs_[src].offset_);
|
|
|
|
size_t copy_len = 0;
|
|
|
|
if (offset + length <=
|
|
|
|
bufs_[src].offset_ + bufs_[src].buffer_.CurrentSize()) {
|
|
|
|
// All the bytes are in src.
|
|
|
|
copy_len = length;
|
|
|
|
} else {
|
|
|
|
copy_len = bufs_[src].buffer_.CurrentSize() - copy_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(bufs_[2].buffer_.BufferStart() + bufs_[2].buffer_.CurrentSize(),
|
|
|
|
bufs_[src].buffer_.BufferStart() + copy_offset, copy_len);
|
|
|
|
|
|
|
|
bufs_[2].buffer_.Size(bufs_[2].buffer_.CurrentSize() + copy_len);
|
|
|
|
|
|
|
|
// Update offset and length.
|
|
|
|
offset += copy_len;
|
|
|
|
length -= copy_len;
|
|
|
|
|
|
|
|
// length > 0 indicates it has consumed all data from the src buffer and it
|
|
|
|
// still needs to read more other buffer.
|
|
|
|
if (length > 0) {
|
|
|
|
bufs_[src].buffer_.Clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
void FilePrefetchBuffer::PollAndUpdateBuffersIfNeeded(uint64_t offset) {
|
2022-04-04 22:35:43 +00:00
|
|
|
if (async_read_in_progress_ && fs_ != nullptr) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// Wait for prefetch data to complete.
|
|
|
|
// No mutex is needed as PrefetchAsyncCallback updates the result in second
|
|
|
|
// buffer and FilePrefetchBuffer should wait for Poll before accessing the
|
|
|
|
// second buffer.
|
|
|
|
std::vector<void*> handles;
|
|
|
|
handles.emplace_back(io_handle_);
|
2022-04-26 04:58:22 +00:00
|
|
|
StopWatch sw(clock_, stats_, POLL_WAIT_MICROS);
|
2022-04-04 22:35:43 +00:00
|
|
|
fs_->Poll(handles, 1).PermitUncheckedError();
|
|
|
|
}
|
2022-04-07 01:36:23 +00:00
|
|
|
|
|
|
|
// Reset and Release io_handle_ after the Poll API as request has been
|
|
|
|
// completed.
|
|
|
|
async_read_in_progress_ = false;
|
2022-04-04 22:35:43 +00:00
|
|
|
if (io_handle_ != nullptr && del_fn_ != nullptr) {
|
|
|
|
del_fn_(io_handle_);
|
|
|
|
io_handle_ = nullptr;
|
|
|
|
del_fn_ = nullptr;
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// Index of second buffer.
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
|
2022-03-26 01:26:22 +00:00
|
|
|
// First clear the buffers if it contains outdated data. Outdated data can be
|
|
|
|
// because previous sequential reads were read from the cache instead of these
|
|
|
|
// buffer.
|
|
|
|
{
|
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset >= bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
}
|
|
|
|
if (bufs_[second].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset >= bufs_[second].offset_ + bufs_[second].buffer_.CurrentSize()) {
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// If data is in second buffer, make it curr_. Second buffer can be either
|
|
|
|
// partial filled or full.
|
|
|
|
if (bufs_[second].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset >= bufs_[second].offset_ &&
|
2022-03-26 01:26:22 +00:00
|
|
|
offset < bufs_[second].offset_ + bufs_[second].buffer_.CurrentSize()) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// Clear the curr_ as buffers have been swapped and curr_ contains the
|
2022-03-26 01:26:22 +00:00
|
|
|
// outdated data and switch the buffers.
|
2022-03-21 14:12:43 +00:00
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
curr_ = curr_ ^ 1;
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If async_read = true:
|
|
|
|
// async_read is enabled in case of sequential reads. So when
|
|
|
|
// buffers are switched, we clear the curr_ buffer as we assume the data has
|
|
|
|
// been consumed because of sequential reads.
|
|
|
|
//
|
|
|
|
// Scenarios for prefetching asynchronously:
|
|
|
|
// Case1: If both buffers are empty, prefetch n bytes
|
|
|
|
// synchronously in curr_
|
|
|
|
// and prefetch readahead_size_/2 async in second buffer.
|
|
|
|
// Case2: If second buffer has partial or full data, make it current and
|
|
|
|
// prefetch readahead_size_/2 async in second buffer. In case of
|
|
|
|
// partial data, prefetch remaining bytes from size n synchronously to
|
|
|
|
// fulfill the requested bytes request.
|
|
|
|
// Case3: If curr_ has partial data, prefetch remaining bytes from size n
|
|
|
|
// synchronously in curr_ to fulfill the requested bytes request and
|
|
|
|
// prefetch readahead_size_/2 bytes async in second buffer.
|
|
|
|
// Case4: If data is in both buffers, copy requested data from curr_ and second
|
|
|
|
// buffer to third buffer. If all requested bytes have been copied, do
|
|
|
|
// the asynchronous prefetching in second buffer.
|
|
|
|
Status FilePrefetchBuffer::PrefetchAsyncInternal(
|
|
|
|
const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset,
|
|
|
|
size_t length, size_t readahead_size, Env::IOPriority rate_limiter_priority,
|
|
|
|
bool& copy_to_third_buffer) {
|
|
|
|
if (!enable_) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::PrefetchAsyncInternal:Start");
|
|
|
|
|
|
|
|
PollAndUpdateBuffersIfNeeded(offset);
|
|
|
|
|
|
|
|
// If all the requested bytes are in curr_, it will go for async prefetching
|
|
|
|
// only.
|
2022-03-26 01:26:22 +00:00
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset + length <=
|
|
|
|
bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
|
|
|
offset += length;
|
|
|
|
length = 0;
|
2022-06-16 03:17:35 +00:00
|
|
|
|
|
|
|
// Since async request was submitted directly by calling PrefetchAsync in
|
|
|
|
// last call, we don't need to prefetch further as this call is to poll the
|
|
|
|
// data submitted in previous call.
|
|
|
|
if (async_request_submitted_) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2022-06-16 03:17:35 +00:00
|
|
|
async_request_submitted_ = false;
|
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
Status s;
|
|
|
|
size_t prefetch_size = length + readahead_size;
|
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
|
|
|
// Index of second buffer.
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// Data is overlapping i.e. some of the data is in curr_ buffer and remaining
|
|
|
|
// in second buffer.
|
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() > 0 &&
|
|
|
|
bufs_[second].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset >= bufs_[curr_].offset_ &&
|
|
|
|
offset < bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize() &&
|
2022-03-26 01:26:22 +00:00
|
|
|
offset + length > bufs_[second].offset_) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// Allocate new buffer to third buffer;
|
|
|
|
bufs_[2].buffer_.Clear();
|
|
|
|
bufs_[2].buffer_.Alignment(alignment);
|
|
|
|
bufs_[2].buffer_.AllocateNewBuffer(length);
|
|
|
|
bufs_[2].offset_ = offset;
|
|
|
|
copy_to_third_buffer = true;
|
|
|
|
|
|
|
|
// Move data from curr_ buffer to third.
|
|
|
|
CopyDataToBuffer(curr_, offset, length);
|
|
|
|
if (length == 0) {
|
|
|
|
// Requested data has been copied and curr_ still has unconsumed data.
|
2019-09-16 17:31:27 +00:00
|
|
|
return s;
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
CopyDataToBuffer(second, offset, length);
|
|
|
|
// Length == 0: All the requested data has been copied to third buffer. It
|
|
|
|
// should go for only async prefetching.
|
|
|
|
// Length > 0: More data needs to be consumed so it will continue async and
|
|
|
|
// sync prefetching and copy the remaining data to third buffer in the end.
|
|
|
|
// swap the buffers.
|
|
|
|
curr_ = curr_ ^ 1;
|
Fix bug in async_io path which reads incorrect length (#9916)
Summary:
In FilePrefetchBuffer, in case data is overlapping between two
buffers and more data is required to read and copy that to third buffer,
incorrect length was updated resulting in
```
Iterator diverged from control iterator which has value 00000000000310C3000000000000012B0000000000000274 total_order_seek: 1 auto_prefix_mode: 0 S 000000000002C37F000000000000012B000000000000001C NNNPPPPPNN; total_order_seek: 1 auto_prefix_mode: 0 S 000000000002F10B00000000000000BF78787878787878 NNNPNNNNPN; total_order_seek: 1 auto_prefix_mode: 0 S 00000000000310C3000000000000012B000000000000026B
iterator is not valid
Control CF default
db_stress: db_stress_tool/db_stress_test_base.cc:1388: void rocksdb::StressTest::VerifyIterator(rocksdb::ThreadState*, rocksdb::ColumnFamilyHandle*, const rocksdb::ReadOptions&, rocksdb::Iterator*, rocksdb::Iterator*, rocksdb::StressTest::LastIterateOp, const rocksdb::Slice&, const string&, bool*): Assertion `false' failed.
Aborted (core dumped)
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9916
Test Plan:
```
- CircleCI jobs
- Ran db_stress with OPTIONS file which caught the bug
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=0 --async_io=1 --avoid_flush_during_recovery=0 --avoid_unnecessary_blocking_io=1 --backup_max_size=104857600 --backup_one_in=0 --batch_protection_bytes_per_key=0 --block_size=16384 --bloom_bits=42.26248932628998 --bottommost_compression_type=disable --cache_index_and_filter_blocks=0 --cache_size=8388608 --checkpoint_one_in=0 --checksum_type=kxxHash --clear_column_family_one_in=0 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_ttl=0 --compression_max_dict_buffer_bytes=1073741823 --compression_max_dict_bytes=16384 --compression_parallel_threads=1 --compression_type=zstd --compression_zstd_max_train_bytes=65536 --continuous_verification_interval=0 --db=/dev/shm/rocksdb/ --db_write_buffer_size=134217728 --delpercent=5 --delrangepercent=0 --destroy_db_initially=0 --detect_filter_construct_corruption=0 --disable_wal=0 --enable_blob_files=0 --enable_compaction_filter=0 --enable_pipelined_write=0 --fail_if_options_file_error=0 --file_checksum_impl=none --flush_one_in=1000000 --format_version=4 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=12 --index_type=2 --ingest_external_file_one_in=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=True --long_running_snapshots=0 --mark_for_compaction_one_file_in=0 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=25000000 --max_key_len=3 --max_manifest_file_size=1073741824 --max_write_batch_group_size_bytes=1048576 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=8388608 --memtable_prefix_bloom_size_ratio=0.001 --memtable_whole_key_filtering=1 --memtablerep=skip_list --mmap_read=0 --mock_direct_io=False --nooverwritepercent=1 --open_files=100 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=16 --ops_per_thread=100000000 --optimize_filters_for_memory=0 --paranoid_file_checks=1 --partition_filters=0 --partition_pinning=1 --pause_background_one_in=1000000 --periodic_compaction_seconds=0 --prefix_size=-1 --prefixpercent=0 --prepopulate_block_cache=0 --progress_reports=0 --read_fault_one_in=0 --read_only=0 --readpercent=50 --recycle_log_file_num=1 --reopen=0 --reserve_table_reader_memory=0 --ribbon_starting_level=999 --secondary_cache_fault_one_in=0 --secondary_catch_up_one_in=0 --set_options_one_in=10000 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=104857600 --sst_file_manager_bytes_per_truncate=0 --subcompactions=2 --sync=0 --sync_fault_injection=False --target_file_size_base=2097152 --target_file_size_multiplier=2 --test_batches_snapshots=0 --test_cf_consistency=0 --top_level_index_pinning=3 --unpartitioned_pinning=3 --use_blob_db=0 --use_block_based_filter=0 --use_clock_cache=0 --use_direct_io_for_flush_and_compaction=1 --use_direct_reads=0 --use_full_merge_v1=0 --use_merge=0 --use_multiget=0 --use_txn=0 --user_timestamp_size=0 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --wal_compression=zstd --write_buffer_size=4194304 --write_dbid_to_manifest=0 --writepercent=35 --options_file=/home/akankshamahajan/OPTIONS.orig -column_families=1
db_bench with async_io enabled to make sure db_bench completes successfully without any failure.
- ./db_bench -use_existing_db=true -db=/tmp/prefix_scan_prefetch_main -benchmarks="seekrandom" -key_size=32 -value_size=512 -num=5000000 -use_direct_reads=true -seek_nexts=327680 -duration=120 -ops_between_duration_checks=1 -async_io=1
```
crash_test in progress
Reviewed By: anand1976
Differential Revision: D35985789
Pulled By: akankshamahajan15
fbshipit-source-id: 5abe185f34caa99ca587d4bdc8954bd0802b1bf9
2022-04-28 05:33:29 +00:00
|
|
|
// Update prefetch_size as length has been updated in CopyDataToBuffer.
|
|
|
|
prefetch_size = length + readahead_size;
|
2019-09-16 17:31:27 +00:00
|
|
|
}
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
size_t _offset = static_cast<size_t>(offset);
|
2022-05-20 23:09:33 +00:00
|
|
|
second = curr_ ^ 1;
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
// offset and size alignment for curr_ buffer with synchronous prefetching
|
|
|
|
uint64_t rounddown_start1 = Rounddown(_offset, alignment);
|
|
|
|
uint64_t roundup_end1 = Roundup(_offset + prefetch_size, alignment);
|
|
|
|
uint64_t roundup_len1 = roundup_end1 - rounddown_start1;
|
|
|
|
assert(roundup_len1 >= alignment);
|
|
|
|
assert(roundup_len1 % alignment == 0);
|
|
|
|
uint64_t chunk_len1 = 0;
|
|
|
|
uint64_t read_len1 = 0;
|
|
|
|
|
|
|
|
// For length == 0, skip the synchronous prefetching. read_len1 will be 0.
|
|
|
|
if (length > 0) {
|
|
|
|
CalculateOffsetAndLen(alignment, offset, roundup_len1, curr_,
|
|
|
|
false /*refit_tail*/, chunk_len1);
|
2022-03-26 01:26:22 +00:00
|
|
|
assert(roundup_len1 >= chunk_len1);
|
2022-03-21 14:12:43 +00:00
|
|
|
read_len1 = static_cast<size_t>(roundup_len1 - chunk_len1);
|
2019-09-16 17:31:27 +00:00
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
{
|
|
|
|
// offset and size alignment for second buffer for asynchronous
|
|
|
|
// prefetching
|
|
|
|
uint64_t rounddown_start2 = roundup_end1;
|
|
|
|
uint64_t roundup_end2 =
|
|
|
|
Roundup(rounddown_start2 + readahead_size, alignment);
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// For length == 0, do the asynchronous prefetching in second instead of
|
2022-03-26 01:26:22 +00:00
|
|
|
// synchronous prefetching in curr_.
|
2022-03-21 14:12:43 +00:00
|
|
|
if (length == 0) {
|
|
|
|
rounddown_start2 =
|
|
|
|
bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize();
|
|
|
|
roundup_end2 = Roundup(rounddown_start2 + prefetch_size, alignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t roundup_len2 = roundup_end2 - rounddown_start2;
|
|
|
|
uint64_t chunk_len2 = 0;
|
|
|
|
CalculateOffsetAndLen(alignment, rounddown_start2, roundup_len2, second,
|
|
|
|
false /*refit_tail*/, chunk_len2);
|
|
|
|
|
|
|
|
// Update the buffer offset.
|
|
|
|
bufs_[second].offset_ = rounddown_start2;
|
2022-03-26 01:26:22 +00:00
|
|
|
assert(roundup_len2 >= chunk_len2);
|
2022-03-21 14:12:43 +00:00
|
|
|
uint64_t read_len2 = static_cast<size_t>(roundup_len2 - chunk_len2);
|
2022-07-06 18:42:59 +00:00
|
|
|
ReadAsync(opts, reader, read_len2, chunk_len2, rounddown_start2, second)
|
2022-03-21 14:12:43 +00:00
|
|
|
.PermitUncheckedError();
|
2020-10-14 17:43:37 +00:00
|
|
|
}
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
if (read_len1 > 0) {
|
|
|
|
s = Read(opts, reader, rate_limiter_priority, read_len1, chunk_len1,
|
|
|
|
rounddown_start1, curr_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Copy remaining requested bytes to third_buffer.
|
|
|
|
if (copy_to_third_buffer && length > 0) {
|
|
|
|
CopyDataToBuffer(curr_, offset, length);
|
2020-04-24 20:03:08 +00:00
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:51:57 +00:00
|
|
|
bool FilePrefetchBuffer::TryReadFromCache(const IOOptions& opts,
|
2021-11-20 01:52:42 +00:00
|
|
|
RandomAccessFileReader* reader,
|
2020-06-29 21:51:57 +00:00
|
|
|
uint64_t offset, size_t n,
|
2020-12-30 17:24:04 +00:00
|
|
|
Slice* result, Status* status,
|
2022-02-17 07:17:03 +00:00
|
|
|
Env::IOPriority rate_limiter_priority,
|
|
|
|
bool for_compaction /* = false */) {
|
2019-09-16 17:31:27 +00:00
|
|
|
if (track_min_offset_ && offset < min_offset_read_) {
|
|
|
|
min_offset_read_ = static_cast<size_t>(offset);
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
if (!enable_ || (offset < bufs_[curr_].offset_)) {
|
2019-09-16 17:31:27 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the buffer contains only a few of the requested bytes:
|
2021-02-18 22:29:36 +00:00
|
|
|
// If readahead is enabled: prefetch the remaining bytes + readahead bytes
|
2019-09-16 17:31:27 +00:00
|
|
|
// and satisfy the request.
|
|
|
|
// If readahead is not enabled: return false.
|
2021-12-01 06:52:14 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("FilePrefetchBuffer::TryReadFromCache",
|
|
|
|
&readahead_size_);
|
2022-03-21 14:12:43 +00:00
|
|
|
if (offset + n > bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
2019-09-16 17:31:27 +00:00
|
|
|
if (readahead_size_ > 0) {
|
2022-03-21 14:12:43 +00:00
|
|
|
Status s;
|
2021-11-20 01:52:42 +00:00
|
|
|
assert(reader != nullptr);
|
2019-09-16 17:31:27 +00:00
|
|
|
assert(max_readahead_size_ >= readahead_size_);
|
|
|
|
if (for_compaction) {
|
2021-11-20 01:52:42 +00:00
|
|
|
s = Prefetch(opts, reader, offset, std::max(n, readahead_size_),
|
2022-02-17 07:17:03 +00:00
|
|
|
rate_limiter_priority);
|
2019-09-16 17:31:27 +00:00
|
|
|
} else {
|
2021-04-28 19:52:53 +00:00
|
|
|
if (implicit_auto_readahead_) {
|
2022-03-21 14:12:43 +00:00
|
|
|
if (!IsEligibleForPrefetch(offset, n)) {
|
2021-04-28 19:52:53 +00:00
|
|
|
// Ignore status as Prefetch is not called.
|
|
|
|
s.PermitUncheckedError();
|
|
|
|
return false;
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
s = Prefetch(opts, reader, offset, n + readahead_size_,
|
|
|
|
rate_limiter_priority);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
if (status) {
|
|
|
|
*status = s;
|
|
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
|
|
IGNORE_STATUS_IF_ERROR(s);
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
readahead_size_ = std::min(max_readahead_size_, readahead_size_ * 2);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
UpdateReadPattern(offset, n, false /*decrease_readaheadsize*/);
|
|
|
|
|
|
|
|
uint64_t offset_in_buffer = offset - bufs_[curr_].offset_;
|
|
|
|
*result = Slice(bufs_[curr_].buffer_.BufferStart() + offset_in_buffer, n);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FilePrefetchBuffer::TryReadFromCacheAsync(
|
|
|
|
const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset,
|
|
|
|
size_t n, Slice* result, Status* status,
|
2022-05-23 19:15:26 +00:00
|
|
|
Env::IOPriority rate_limiter_priority) {
|
2022-03-21 14:12:43 +00:00
|
|
|
if (track_min_offset_ && offset < min_offset_read_) {
|
|
|
|
min_offset_read_ = static_cast<size_t>(offset);
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2022-05-23 19:15:26 +00:00
|
|
|
if (!enable_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
// In case of async_io_, offset can be less than bufs_[curr_].offset_ because
|
|
|
|
// of reads not sequential and PrefetchAsync can be called for any block and
|
|
|
|
// RocksDB will call TryReadFromCacheAsync after PrefetchAsync to Poll for
|
2022-05-23 19:15:26 +00:00
|
|
|
// requested bytes.
|
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() > 0 && offset < bufs_[curr_].offset_ &&
|
|
|
|
prev_len_ != 0) {
|
2022-03-21 14:12:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool prefetched = false;
|
|
|
|
bool copy_to_third_buffer = false;
|
|
|
|
// If the buffer contains only a few of the requested bytes:
|
|
|
|
// If readahead is enabled: prefetch the remaining bytes + readahead bytes
|
|
|
|
// and satisfy the request.
|
|
|
|
// If readahead is not enabled: return false.
|
|
|
|
TEST_SYNC_POINT_CALLBACK("FilePrefetchBuffer::TryReadFromCache",
|
|
|
|
&readahead_size_);
|
2022-05-20 23:09:33 +00:00
|
|
|
if (offset < bufs_[curr_].offset_ ||
|
|
|
|
offset + n > bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
2022-03-21 14:12:43 +00:00
|
|
|
if (readahead_size_ > 0) {
|
|
|
|
Status s;
|
|
|
|
assert(reader != nullptr);
|
|
|
|
assert(max_readahead_size_ >= readahead_size_);
|
2022-05-23 19:15:26 +00:00
|
|
|
|
|
|
|
if (implicit_auto_readahead_) {
|
|
|
|
if (!IsEligibleForPrefetch(offset, n)) {
|
|
|
|
// Ignore status as Prefetch is not called.
|
|
|
|
s.PermitUncheckedError();
|
|
|
|
return false;
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
}
|
2022-05-23 19:15:26 +00:00
|
|
|
|
|
|
|
// Prefetch n + readahead_size_/2 synchronously as remaining
|
|
|
|
// readahead_size_/2 will be prefetched asynchronously.
|
|
|
|
s = PrefetchAsyncInternal(opts, reader, offset, n, readahead_size_ / 2,
|
|
|
|
rate_limiter_priority, copy_to_third_buffer);
|
2019-09-16 17:31:27 +00:00
|
|
|
if (!s.ok()) {
|
2020-12-30 17:24:04 +00:00
|
|
|
if (status) {
|
|
|
|
*status = s;
|
|
|
|
}
|
2020-10-20 16:11:50 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
IGNORE_STATUS_IF_ERROR(s);
|
|
|
|
#endif
|
2019-09-16 17:31:27 +00:00
|
|
|
return false;
|
|
|
|
}
|
2022-06-16 03:17:35 +00:00
|
|
|
prefetched = async_request_submitted_ ? false : true;
|
2019-09-16 17:31:27 +00:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
UpdateReadPattern(offset, n, false /*decrease_readaheadsize*/);
|
|
|
|
|
|
|
|
uint32_t index = curr_;
|
|
|
|
if (copy_to_third_buffer) {
|
|
|
|
index = 2;
|
|
|
|
}
|
|
|
|
uint64_t offset_in_buffer = offset - bufs_[index].offset_;
|
|
|
|
*result = Slice(bufs_[index].buffer_.BufferStart() + offset_in_buffer, n);
|
|
|
|
if (prefetched) {
|
|
|
|
readahead_size_ = std::min(max_readahead_size_, readahead_size_ * 2);
|
|
|
|
}
|
2022-06-16 03:17:35 +00:00
|
|
|
async_request_submitted_ = false;
|
2019-09-16 17:31:27 +00:00
|
|
|
return true;
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
void FilePrefetchBuffer::PrefetchAsyncCallback(const FSReadRequest& req,
|
|
|
|
void* /*cb_arg*/) {
|
|
|
|
uint32_t index = curr_ ^ 1;
|
Fix stress test failure in ReadAsync. (#9824)
Summary:
Fix stress test failure in ReadAsync by ignoring errors
injected during async read by FaultInjectionFS.
Failure:
```
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
Didn't get expected error from MultiGet.
num_keys 14 Expected 1 errors, seen 0
Callstack that injected the fault
Injected error type = 32538
Message: error;
#0 ./db_stress() [0x6f7dd4] rocksdb::port::SaveStack(int*, int) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/port/stack_trace.cc:152
https://github.com/facebook/rocksdb/issues/1 ./db_stress() [0x7f2bda] rocksdb::FaultInjectionTestFS::InjectThreadSpecificReadError(rocksdb::FaultInjectionTestFS::ErrorOperation, rocksdb::Slice*, bool, char*, bool, bool*) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:891
https://github.com/facebook/rocksdb/issues/2 ./db_stress() [0x7f2e78] rocksdb::TestFSRandomAccessFile::Read(unsigned long, unsigned long, rocksdb::IOOptions const&, rocksdb::Slice*, char*, rocksdb::IODebugContext*) const /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:367
https://github.com/facebook/rocksdb/issues/3 ./db_stress() [0x6483d7] rocksdb::(anonymous namespace)::CompositeRandomAccessFileWrapper::Read(unsigned long, unsigned long, rocksdb::Slice*, char*) const /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/env/composite_env.cc:61
https://github.com/facebook/rocksdb/issues/4 ./db_stress() [0x654564] rocksdb::(anonymous namespace)::LegacyRandomAccessFileWrapper::Read(unsigned long, unsigned long, rocksdb::IOOptions const&, rocksdb::Slice*, char*, rocksdb::IODebugContext*) const /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/env/env.cc:152
https://github.com/facebook/rocksdb/issues/5 ./db_stress() [0x659b3b] rocksdb::FSRandomAccessFile::ReadAsync(rocksdb::FSReadRequest&, rocksdb::IOOptions const&, std::function<void (rocksdb::FSReadRequest const&, void*)>, void*, void**, std::function<void (void*)>*, rocksdb::IODebugContext*) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/./include/rocksdb/file_system.h:896
https://github.com/facebook/rocksdb/issues/6 ./db_stress() [0x8b8bab] rocksdb::RandomAccessFileReader::ReadAsync(rocksdb::FSReadRequest&, rocksdb::IOOptions const&, std::function<void (rocksdb::FSReadRequest const&, void*)>, void*, void**, std::function<void (void*)>*, rocksdb::Env::IOPriority) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/random_access_file_reader.cc:459
https://github.com/facebook/rocksdb/issues/7 ./db_stress() [0x8b501f] rocksdb::FilePrefetchBuffer::ReadAsync(rocksdb::IOOptions const&, rocksdb::RandomAccessFileReader*, rocksdb::Env::IOPriority, unsigned long, unsigned long, unsigned long, unsigned int) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/file_prefetch_buffer.cc:124
https://github.com/facebook/rocksdb/issues/8 ./db_stress() [0x8b55fc] rocksdb::FilePrefetchBuffer::PrefetchAsync(rocksdb::IOOptions const&, rocksdb::RandomAccessFileReader*, unsigned long, unsigned long, unsigned long, rocksdb::Env::IOPriority, bool&) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/file_prefetch_buffer.cc:363
https://github.com/facebook/rocksdb/issues/9 ./db_stress() [0x8b61f8] rocksdb::FilePrefetchBuffer::TryReadFromCacheAsync(rocksdb::IOOptions const&, rocksdb::RandomAccessFileReader*, unsigned long, unsigned long, rocksdb::Slice*, rocksdb::Status*, rocksdb::Env::IOPriority, bool) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/file_prefetch_buffer.cc:482
https://github.com/facebook/rocksdb/issues/10 ./db_stress() [0x745e04] rocksdb::BlockFetcher::TryGetFromPrefetchBuffer() /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/table/block_fetcher.cc:76
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9824
Test Plan:
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_concurrent_memtable_write=0 --async_io=1 --atomic_flush=1 --avoid_flush_during_recovery=0 --avoid_unnecessary_blocking_io=0 -- backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=0 --block_size=16384 --bloom_bits=5.037629726741734 --bottommost_compression_type=lz4hc --cache_index_and_filter_blocks=0 --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kxxHash --clear_column_family_one_in=0 --column_families=1 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_ttl=100 --compression_max_dict_buffer_bytes=1073741823 --compression_max_dict_bytes=16384 --compression_parallel_threads=1 --compression_type=zstd --compression_zstd_max_train_bytes=0 --continuous_verification_interval=0 --db=/home/akankshamahajan/dev/shm/rocksdb/rocksdb_crashtest_blackbox --db_write_buffer_size=8388608 --delpercent=0 --delrangepercent=0 --destroy_db_initially=0 - detect_filter_construct_corruption=1 --disable_wal=1 --enable_compaction_filter=0 --enable_pipelined_write=0 --expected_values_dir=/home/akankshamahajan/dev/shm/rocksdb/rocksdb_crashtest_expected --experimental_mempurge_threshold=8.772789063014715 --fail_if_options_file_error=0 --file_checksum_impl=crc32c --flush_one_in=1000000 --format_version=3 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=15 --index_type=3 --iterpercent=0 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=False --long_running_snapshots=0 --mark_for_compaction_one_file_in=0 --max_background_compactions=1 --max_bytes_for_level_base=67108864 --max_key=25000000 --max_key_len=3 --max_manifest_file_size=1073741824 --max_write_batch_group_size_bytes=16777216 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=2097152 --memtable_prefix_bloom_size_ratio=0.001 --memtable_whole_key_filtering=1 --memtablerep=skip_list --mmap_read=0 --mock_direct_io=True --nooverwritepercent=1 --open_files=-1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=0 --paranoid_file_checks=1 --partition_filters=0 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=1000 --prefix_size=-1 --prefixpercent=0 --prepopulate_block_cache=0 --progress_reports=0 --read_fault_one_in=32 --readpercent=100 --recycle_log_file_num=1 --reopen=0 --reserve_table_reader_memory=1 --ribbon_starting_level=999 --secondary_cache_fault_one_in=0 --set_options_one_in=0 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=2 --sync=0 --sync_fault_injection=False --target_file_size_base=16777216 --target_file_size_multiplier=1 --test_batches_snapshots=0 --top_level_index_pinning=3 --unpartitioned_pinning=2 --use_block_based_filter=0 --use_clock_cache=0 --use_direct_io_for_flush_and_compaction=1 --use_direct_reads=0 --use_full_merge_v1=0 --use_merge=1 --use_multiget=1 --user_timestamp_size=0 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --wal_compression=none --write_buffer_size=33554432 --write_dbid_to_manifest=1 --write_fault_one_in=0 --writepercent=0
```
Reviewed By: anand1976
Differential Revision: D35514566
Pulled By: akankshamahajan15
fbshipit-source-id: e2a868fdd7422604774c1419738f9926a21e92a4
2022-04-11 17:56:11 +00:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (req.result.size() < req.len) {
|
|
|
|
// Fake an IO error to force db_stress fault injection to ignore
|
|
|
|
// truncated read errors
|
|
|
|
IGNORE_STATUS_IF_ERROR(Status::IOError());
|
|
|
|
}
|
|
|
|
IGNORE_STATUS_IF_ERROR(req.status);
|
|
|
|
#endif
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
if (req.status.ok()) {
|
|
|
|
if (req.offset + req.result.size() <=
|
|
|
|
bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize()) {
|
2022-07-06 18:42:59 +00:00
|
|
|
// All requested bytes are already in the buffer or no data is read
|
|
|
|
// because of EOF. So no need to update.
|
2022-03-21 14:12:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (req.offset < bufs_[index].offset_) {
|
|
|
|
// Next block to be read has changed (Recent read was not a sequential
|
|
|
|
// read). So ignore this read.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
size_t current_size = bufs_[index].buffer_.CurrentSize();
|
|
|
|
bufs_[index].buffer_.Size(current_size + req.result.size());
|
|
|
|
}
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
|
|
|
Status FilePrefetchBuffer::PrefetchAsync(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t n,
|
|
|
|
Slice* result) {
|
|
|
|
assert(reader != nullptr);
|
|
|
|
if (!enable_) {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::PrefetchAsync:Start");
|
|
|
|
|
|
|
|
PollAndUpdateBuffersIfNeeded(offset);
|
|
|
|
|
|
|
|
// Index of second buffer.
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
|
2022-05-23 19:15:26 +00:00
|
|
|
// Since PrefetchAsync can be called on non sequential reads. So offset can
|
2022-05-20 23:09:33 +00:00
|
|
|
// be less than buffers' offset. In that case it clears the buffer and
|
|
|
|
// prefetch that block.
|
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() > 0 && offset < bufs_[curr_].offset_) {
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// All requested bytes are already in the curr_ buffer. So no need to Read
|
|
|
|
// again.
|
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() > 0 &&
|
|
|
|
offset + n <= bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
|
|
|
uint64_t offset_in_buffer = offset - bufs_[curr_].offset_;
|
|
|
|
*result = Slice(bufs_[curr_].buffer_.BufferStart() + offset_in_buffer, n);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status s;
|
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
|
|
|
|
|
|
|
// TODO akanksha: Handle the scenario if data is overlapping in 2 buffers.
|
|
|
|
// Currently, tt covers 2 scenarios. Either one buffer (curr_) has no data or
|
|
|
|
// it has partial data. It ignores the contents in second buffer (overlapping
|
|
|
|
// data in 2 buffers) and send the request to re-read that data again.
|
|
|
|
|
|
|
|
// Clear the second buffer in order to do asynchronous prefetching.
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
|
|
|
|
size_t offset_to_read = static_cast<size_t>(offset);
|
|
|
|
uint64_t rounddown_start = 0;
|
|
|
|
uint64_t roundup_end = 0;
|
|
|
|
|
|
|
|
if (bufs_[curr_].buffer_.CurrentSize() == 0) {
|
|
|
|
// Prefetch full data.
|
|
|
|
rounddown_start = Rounddown(offset_to_read, alignment);
|
|
|
|
roundup_end = Roundup(offset_to_read + n, alignment);
|
|
|
|
} else {
|
|
|
|
// Prefetch remaining data.
|
|
|
|
size_t rem_length = n - (bufs_[curr_].buffer_.CurrentSize() -
|
|
|
|
(offset - bufs_[curr_].offset_));
|
|
|
|
rounddown_start = bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize();
|
|
|
|
roundup_end = Roundup(rounddown_start + rem_length, alignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t roundup_len = roundup_end - rounddown_start;
|
|
|
|
assert(roundup_len >= alignment);
|
|
|
|
assert(roundup_len % alignment == 0);
|
|
|
|
|
|
|
|
uint64_t chunk_len = 0;
|
|
|
|
CalculateOffsetAndLen(alignment, rounddown_start, roundup_len, second, false,
|
|
|
|
chunk_len);
|
|
|
|
|
|
|
|
// Update the buffer offset.
|
|
|
|
bufs_[second].offset_ = rounddown_start;
|
|
|
|
assert(roundup_len >= chunk_len);
|
|
|
|
|
|
|
|
size_t read_len = static_cast<size_t>(roundup_len - chunk_len);
|
|
|
|
|
2022-07-06 18:42:59 +00:00
|
|
|
s = ReadAsync(opts, reader, read_len, chunk_len, rounddown_start, second);
|
2022-05-20 23:09:33 +00:00
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update read pattern so that TryReadFromCacheAsync call be called to Poll
|
|
|
|
// the data. It will return without polling if blocks are not sequential.
|
|
|
|
UpdateReadPattern(offset, n, /*decrease_readaheadsize=*/false);
|
|
|
|
prev_len_ = 0;
|
2022-06-16 03:17:35 +00:00
|
|
|
async_request_submitted_ = true;
|
2022-05-20 23:09:33 +00:00
|
|
|
|
|
|
|
return Status::TryAgain();
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|