2019-09-16 17:31:27 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "file/file_prefetch_buffer.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
2022-09-13 00:42:01 +00:00
|
|
|
#include <cassert>
|
2019-09-16 17:31:27 +00:00
|
|
|
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
|
|
#include "monitoring/histogram.h"
|
|
|
|
#include "monitoring/iostats_context_imp.h"
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "util/random.h"
|
2023-05-17 18:27:09 +00:00
|
|
|
#include "util/rate_limiter_impl.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
void FilePrefetchBuffer::CalculateOffsetAndLen(size_t alignment,
|
|
|
|
uint64_t offset,
|
2022-09-13 00:42:01 +00:00
|
|
|
size_t roundup_len,
|
|
|
|
uint32_t index, bool refit_tail,
|
2022-03-21 14:12:43 +00:00
|
|
|
uint64_t& chunk_len) {
|
|
|
|
uint64_t chunk_offset_in_buffer = 0;
|
|
|
|
bool copy_data_to_new_buffer = false;
|
|
|
|
// Check if requested bytes are in the existing buffer_.
|
|
|
|
// If only a few bytes exist -- reuse them & read only what is really needed.
|
|
|
|
// This is typically the case of incremental reading of data.
|
|
|
|
// If no bytes exist in buffer -- full pread.
|
2022-09-13 00:42:01 +00:00
|
|
|
if (DoesBufferContainData(index) && IsOffsetInBuffer(offset, index)) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// Only a few requested bytes are in the buffer. memmove those chunk of
|
|
|
|
// bytes to the beginning, and memcpy them back into the new buffer if a
|
|
|
|
// new buffer is created.
|
|
|
|
chunk_offset_in_buffer = Rounddown(
|
|
|
|
static_cast<size_t>(offset - bufs_[index].offset_), alignment);
|
|
|
|
chunk_len = static_cast<uint64_t>(bufs_[index].buffer_.CurrentSize()) -
|
|
|
|
chunk_offset_in_buffer;
|
|
|
|
assert(chunk_offset_in_buffer % alignment == 0);
|
2022-09-13 00:42:01 +00:00
|
|
|
assert(chunk_len % alignment == 0);
|
2022-03-21 14:12:43 +00:00
|
|
|
assert(chunk_offset_in_buffer + chunk_len <=
|
|
|
|
bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize());
|
|
|
|
if (chunk_len > 0) {
|
|
|
|
copy_data_to_new_buffer = true;
|
|
|
|
} else {
|
|
|
|
// this reset is not necessary, but just to be safe.
|
|
|
|
chunk_offset_in_buffer = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new buffer only if current capacity is not sufficient, and memcopy
|
|
|
|
// bytes from old buffer if needed (i.e., if chunk_len is greater than 0).
|
|
|
|
if (bufs_[index].buffer_.Capacity() < roundup_len) {
|
|
|
|
bufs_[index].buffer_.Alignment(alignment);
|
|
|
|
bufs_[index].buffer_.AllocateNewBuffer(
|
|
|
|
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
|
|
|
|
chunk_offset_in_buffer, static_cast<size_t>(chunk_len));
|
|
|
|
} else if (chunk_len > 0 && refit_tail) {
|
|
|
|
// New buffer not needed. But memmove bytes from tail to the beginning since
|
|
|
|
// chunk_len is greater than 0.
|
|
|
|
bufs_[index].buffer_.RefitTail(static_cast<size_t>(chunk_offset_in_buffer),
|
|
|
|
static_cast<size_t>(chunk_len));
|
2022-03-26 01:26:22 +00:00
|
|
|
} else if (chunk_len > 0) {
|
|
|
|
// For async prefetching, it doesn't call RefitTail with chunk_len > 0.
|
|
|
|
// Allocate new buffer if needed because aligned buffer calculate remaining
|
|
|
|
// buffer as capacity_ - cursize_ which might not be the case in this as we
|
|
|
|
// are not refitting.
|
|
|
|
// TODO akanksha: Update the condition when asynchronous prefetching is
|
|
|
|
// stable.
|
|
|
|
bufs_[index].buffer_.Alignment(alignment);
|
|
|
|
bufs_[index].buffer_.AllocateNewBuffer(
|
|
|
|
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
|
|
|
|
chunk_offset_in_buffer, static_cast<size_t>(chunk_len));
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status FilePrefetchBuffer::Read(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t read_len, uint64_t chunk_len,
|
|
|
|
uint64_t rounddown_start, uint32_t index) {
|
|
|
|
Slice result;
|
|
|
|
Status s = reader->Read(opts, rounddown_start + chunk_len, read_len, &result,
|
|
|
|
bufs_[index].buffer_.BufferStart() + chunk_len,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
/*aligned_buf=*/nullptr);
|
2022-03-21 14:12:43 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (result.size() < read_len) {
|
|
|
|
// Fake an IO error to force db_stress fault injection to ignore
|
|
|
|
// truncated read errors
|
|
|
|
IGNORE_STATUS_IF_ERROR(Status::IOError());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the buffer offset and size.
|
|
|
|
bufs_[index].offset_ = rounddown_start;
|
|
|
|
bufs_[index].buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status FilePrefetchBuffer::ReadAsync(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
2022-09-13 00:42:01 +00:00
|
|
|
uint64_t read_len,
|
2022-03-21 14:12:43 +00:00
|
|
|
uint64_t rounddown_start, uint32_t index) {
|
2023-01-20 18:17:57 +00:00
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::ReadAsync");
|
2022-03-21 14:12:43 +00:00
|
|
|
// callback for async read request.
|
|
|
|
auto fp = std::bind(&FilePrefetchBuffer::PrefetchAsyncCallback, this,
|
|
|
|
std::placeholders::_1, std::placeholders::_2);
|
|
|
|
FSReadRequest req;
|
|
|
|
Slice result;
|
|
|
|
req.len = read_len;
|
2022-09-13 00:42:01 +00:00
|
|
|
req.offset = rounddown_start;
|
2022-03-21 14:12:43 +00:00
|
|
|
req.result = result;
|
2022-09-13 00:42:01 +00:00
|
|
|
req.scratch = bufs_[index].buffer_.BufferStart();
|
|
|
|
bufs_[index].async_req_len_ = req.len;
|
|
|
|
|
|
|
|
Status s =
|
|
|
|
reader->ReadAsync(req, opts, fp, &(bufs_[index].pos_),
|
|
|
|
&(bufs_[index].io_handle_), &(bufs_[index].del_fn_),
|
|
|
|
/*aligned_buf=*/nullptr);
|
2022-04-04 22:35:43 +00:00
|
|
|
req.status.PermitUncheckedError();
|
2022-03-21 14:12:43 +00:00
|
|
|
if (s.ok()) {
|
2022-09-13 00:42:01 +00:00
|
|
|
bufs_[index].async_read_in_progress_ = true;
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:51:57 +00:00
|
|
|
Status FilePrefetchBuffer::Prefetch(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
uint64_t offset, size_t n) {
|
2019-12-18 18:59:21 +00:00
|
|
|
if (!enable_ || reader == nullptr) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2020-08-28 01:15:11 +00:00
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::Prefetch:Start");
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
if (offset + n <= bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
|
|
|
// All requested bytes are already in the curr_ buffer. So no need to Read
|
|
|
|
// again.
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
|
|
|
size_t offset_ = static_cast<size_t>(offset);
|
|
|
|
uint64_t rounddown_offset = Rounddown(offset_, alignment);
|
|
|
|
uint64_t roundup_end = Roundup(offset_ + n, alignment);
|
|
|
|
uint64_t roundup_len = roundup_end - rounddown_offset;
|
|
|
|
assert(roundup_len >= alignment);
|
|
|
|
assert(roundup_len % alignment == 0);
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
uint64_t chunk_len = 0;
|
|
|
|
CalculateOffsetAndLen(alignment, offset, roundup_len, curr_,
|
|
|
|
true /*refit_tail*/, chunk_len);
|
|
|
|
size_t read_len = static_cast<size_t>(roundup_len - chunk_len);
|
|
|
|
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
Status s = Read(opts, reader, read_len, chunk_len, rounddown_offset, curr_);
|
2023-08-18 22:52:04 +00:00
|
|
|
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
if (usage_ == FilePrefetchBufferUsage::kTableOpenPrefetchTail && s.ok()) {
|
|
|
|
RecordInHistogram(stats_, TABLE_OPEN_PREFETCH_TAIL_READ_BYTES, read_len);
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy data from src to third buffer.
|
|
|
|
void FilePrefetchBuffer::CopyDataToBuffer(uint32_t src, uint64_t& offset,
|
|
|
|
size_t& length) {
|
|
|
|
if (length == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
uint64_t copy_offset = (offset - bufs_[src].offset_);
|
|
|
|
size_t copy_len = 0;
|
2022-09-13 00:42:01 +00:00
|
|
|
if (IsDataBlockInBuffer(offset, length, src)) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// All the bytes are in src.
|
|
|
|
copy_len = length;
|
|
|
|
} else {
|
|
|
|
copy_len = bufs_[src].buffer_.CurrentSize() - copy_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(bufs_[2].buffer_.BufferStart() + bufs_[2].buffer_.CurrentSize(),
|
|
|
|
bufs_[src].buffer_.BufferStart() + copy_offset, copy_len);
|
|
|
|
|
|
|
|
bufs_[2].buffer_.Size(bufs_[2].buffer_.CurrentSize() + copy_len);
|
|
|
|
|
|
|
|
// Update offset and length.
|
|
|
|
offset += copy_len;
|
|
|
|
length -= copy_len;
|
|
|
|
|
|
|
|
// length > 0 indicates it has consumed all data from the src buffer and it
|
|
|
|
// still needs to read more other buffer.
|
|
|
|
if (length > 0) {
|
|
|
|
bufs_[src].buffer_.Clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// Clear the buffers if it contains outdated data. Outdated data can be
|
|
|
|
// because previous sequential reads were read from the cache instead of these
|
|
|
|
// buffer. In that case outdated IOs should be aborted.
|
|
|
|
void FilePrefetchBuffer::AbortIOIfNeeded(uint64_t offset) {
|
2022-03-21 14:12:43 +00:00
|
|
|
uint32_t second = curr_ ^ 1;
|
2022-09-13 00:42:01 +00:00
|
|
|
std::vector<void*> handles;
|
|
|
|
autovector<uint32_t> buf_pos;
|
|
|
|
if (IsBufferOutdatedWithAsyncProgress(offset, curr_)) {
|
|
|
|
handles.emplace_back(bufs_[curr_].io_handle_);
|
|
|
|
buf_pos.emplace_back(curr_);
|
|
|
|
}
|
|
|
|
if (IsBufferOutdatedWithAsyncProgress(offset, second)) {
|
|
|
|
handles.emplace_back(bufs_[second].io_handle_);
|
|
|
|
buf_pos.emplace_back(second);
|
|
|
|
}
|
|
|
|
if (!handles.empty()) {
|
|
|
|
StopWatch sw(clock_, stats_, ASYNC_PREFETCH_ABORT_MICROS);
|
|
|
|
Status s = fs_->AbortIO(handles);
|
|
|
|
assert(s.ok());
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
for (auto& pos : buf_pos) {
|
|
|
|
// Release io_handle.
|
|
|
|
DestroyAndClearIOHandle(pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bufs_[second].io_handle_ == nullptr) {
|
|
|
|
bufs_[second].async_read_in_progress_ = false;
|
|
|
|
}
|
|
|
|
|
2022-11-01 23:06:51 +00:00
|
|
|
if (bufs_[curr_].io_handle_ == nullptr) {
|
2022-09-13 00:42:01 +00:00
|
|
|
bufs_[curr_].async_read_in_progress_ = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void FilePrefetchBuffer::AbortAllIOs() {
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
std::vector<void*> handles;
|
|
|
|
for (uint32_t i = 0; i < 2; i++) {
|
|
|
|
if (bufs_[i].async_read_in_progress_ && bufs_[i].io_handle_ != nullptr) {
|
|
|
|
handles.emplace_back(bufs_[i].io_handle_);
|
2022-03-26 01:26:22 +00:00
|
|
|
}
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
if (!handles.empty()) {
|
|
|
|
StopWatch sw(clock_, stats_, ASYNC_PREFETCH_ABORT_MICROS);
|
|
|
|
Status s = fs_->AbortIO(handles);
|
|
|
|
assert(s.ok());
|
|
|
|
}
|
2022-03-26 01:26:22 +00:00
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// Release io_handles.
|
|
|
|
if (bufs_[curr_].io_handle_ != nullptr && bufs_[curr_].del_fn_ != nullptr) {
|
|
|
|
DestroyAndClearIOHandle(curr_);
|
2022-12-21 17:15:53 +00:00
|
|
|
} else {
|
|
|
|
bufs_[curr_].async_read_in_progress_ = false;
|
2022-09-13 00:42:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (bufs_[second].io_handle_ != nullptr && bufs_[second].del_fn_ != nullptr) {
|
|
|
|
DestroyAndClearIOHandle(second);
|
2022-12-21 17:15:53 +00:00
|
|
|
} else {
|
|
|
|
bufs_[second].async_read_in_progress_ = false;
|
2022-09-13 00:42:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the buffers if it contains outdated data. Outdated data can be
|
|
|
|
// because previous sequential reads were read from the cache instead of these
|
|
|
|
// buffer.
|
|
|
|
void FilePrefetchBuffer::UpdateBuffersIfNeeded(uint64_t offset) {
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
if (IsBufferOutdated(offset, curr_)) {
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
}
|
|
|
|
if (IsBufferOutdated(offset, second)) {
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
}
|
|
|
|
|
2022-11-01 23:06:51 +00:00
|
|
|
{
|
|
|
|
// In case buffers do not align, reset second buffer. This can happen in
|
|
|
|
// case readahead_size is set.
|
|
|
|
if (!bufs_[second].async_read_in_progress_ &&
|
|
|
|
!bufs_[curr_].async_read_in_progress_) {
|
|
|
|
if (DoesBufferContainData(curr_)) {
|
|
|
|
if (bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize() !=
|
|
|
|
bufs_[second].offset_) {
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!IsOffsetInBuffer(offset, second)) {
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// If data starts from second buffer, make it curr_. Second buffer can be
|
2022-11-01 23:06:51 +00:00
|
|
|
// either partial filled, full or async read is in progress.
|
|
|
|
if (bufs_[second].async_read_in_progress_) {
|
|
|
|
if (IsOffsetInBufferWithAsyncProgress(offset, second)) {
|
|
|
|
curr_ = curr_ ^ 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (DoesBufferContainData(second) && IsOffsetInBuffer(offset, second)) {
|
|
|
|
assert(bufs_[curr_].async_read_in_progress_ ||
|
|
|
|
bufs_[curr_].buffer_.CurrentSize() == 0);
|
|
|
|
curr_ = curr_ ^ 1;
|
2022-09-13 00:42:01 +00:00
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
void FilePrefetchBuffer::PollAndUpdateBuffersIfNeeded(uint64_t offset) {
|
|
|
|
if (bufs_[curr_].async_read_in_progress_ && fs_ != nullptr) {
|
|
|
|
if (bufs_[curr_].io_handle_ != nullptr) {
|
|
|
|
// Wait for prefetch data to complete.
|
|
|
|
// No mutex is needed as async_read_in_progress behaves as mutex and is
|
|
|
|
// updated by main thread only.
|
|
|
|
std::vector<void*> handles;
|
|
|
|
handles.emplace_back(bufs_[curr_].io_handle_);
|
|
|
|
StopWatch sw(clock_, stats_, POLL_WAIT_MICROS);
|
|
|
|
fs_->Poll(handles, 1).PermitUncheckedError();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset and Release io_handle after the Poll API as request has been
|
|
|
|
// completed.
|
|
|
|
DestroyAndClearIOHandle(curr_);
|
|
|
|
}
|
|
|
|
UpdateBuffersIfNeeded(offset);
|
|
|
|
}
|
|
|
|
|
2022-11-01 23:06:51 +00:00
|
|
|
Status FilePrefetchBuffer::HandleOverlappingData(
|
2022-05-20 23:09:33 +00:00
|
|
|
const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
size_t length, size_t readahead_size, bool& copy_to_third_buffer,
|
2022-11-01 23:06:51 +00:00
|
|
|
uint64_t& tmp_offset, size_t& tmp_length) {
|
2022-09-13 00:42:01 +00:00
|
|
|
Status s;
|
2022-11-01 23:06:51 +00:00
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
2022-12-21 17:15:53 +00:00
|
|
|
uint32_t second;
|
|
|
|
|
|
|
|
// Check if the first buffer has the required offset and the async read is
|
|
|
|
// still in progress. This should only happen if a prefetch was initiated
|
|
|
|
// by Seek, but the next access is at another offset.
|
|
|
|
if (bufs_[curr_].async_read_in_progress_ &&
|
|
|
|
IsOffsetInBufferWithAsyncProgress(offset, curr_)) {
|
|
|
|
PollAndUpdateBuffersIfNeeded(offset);
|
|
|
|
}
|
|
|
|
second = curr_ ^ 1;
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2022-11-01 23:06:51 +00:00
|
|
|
// If data is overlapping over two buffers, copy the data from curr_ and
|
2022-09-13 00:42:01 +00:00
|
|
|
// call ReadAsync on curr_.
|
|
|
|
if (!bufs_[curr_].async_read_in_progress_ && DoesBufferContainData(curr_) &&
|
|
|
|
IsOffsetInBuffer(offset, curr_) &&
|
|
|
|
(/*Data extends over curr_ buffer and second buffer either has data or in
|
|
|
|
process of population=*/
|
|
|
|
(offset + length > bufs_[second].offset_) &&
|
|
|
|
(bufs_[second].async_read_in_progress_ ||
|
|
|
|
DoesBufferContainData(second)))) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// Allocate new buffer to third buffer;
|
|
|
|
bufs_[2].buffer_.Clear();
|
|
|
|
bufs_[2].buffer_.Alignment(alignment);
|
|
|
|
bufs_[2].buffer_.AllocateNewBuffer(length);
|
|
|
|
bufs_[2].offset_ = offset;
|
|
|
|
copy_to_third_buffer = true;
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
CopyDataToBuffer(curr_, tmp_offset, tmp_length);
|
|
|
|
|
|
|
|
// Call async prefetching on curr_ since data has been consumed in curr_
|
|
|
|
// only if data lies within second buffer.
|
|
|
|
size_t second_size = bufs_[second].async_read_in_progress_
|
|
|
|
? bufs_[second].async_req_len_
|
|
|
|
: bufs_[second].buffer_.CurrentSize();
|
2023-09-20 23:13:20 +00:00
|
|
|
uint64_t rounddown_start = bufs_[second].offset_ + second_size;
|
|
|
|
// Second buffer might be out of bound if first buffer already prefetched
|
|
|
|
// that data.
|
|
|
|
if (tmp_offset + tmp_length <= bufs_[second].offset_ + second_size &&
|
|
|
|
!IsOffsetOutOfBound(rounddown_start)) {
|
2022-09-13 00:42:01 +00:00
|
|
|
uint64_t roundup_end =
|
|
|
|
Roundup(rounddown_start + readahead_size, alignment);
|
|
|
|
uint64_t roundup_len = roundup_end - rounddown_start;
|
|
|
|
uint64_t chunk_len = 0;
|
|
|
|
CalculateOffsetAndLen(alignment, rounddown_start, roundup_len, curr_,
|
|
|
|
false, chunk_len);
|
|
|
|
assert(chunk_len == 0);
|
|
|
|
assert(roundup_len >= chunk_len);
|
|
|
|
|
|
|
|
bufs_[curr_].offset_ = rounddown_start;
|
|
|
|
uint64_t read_len = static_cast<size_t>(roundup_len - chunk_len);
|
|
|
|
s = ReadAsync(opts, reader, read_len, rounddown_start, curr_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
DestroyAndClearIOHandle(curr_);
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
curr_ = curr_ ^ 1;
|
|
|
|
}
|
2022-11-01 23:06:51 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
// If async_io is enabled in case of sequential reads, PrefetchAsyncInternal is
|
|
|
|
// called. When buffers are switched, we clear the curr_ buffer as we assume the
|
|
|
|
// data has been consumed because of sequential reads.
|
|
|
|
// Data in buffers will always be sequential with curr_ following second and
|
|
|
|
// not vice versa.
|
|
|
|
//
|
|
|
|
// Scenarios for prefetching asynchronously:
|
|
|
|
// Case1: If both buffers are empty, prefetch n + readahead_size_/2 bytes
|
|
|
|
// synchronously in curr_ and prefetch readahead_size_/2 async in second
|
|
|
|
// buffer.
|
|
|
|
// Case2: If second buffer has partial or full data, make it current and
|
|
|
|
// prefetch readahead_size_/2 async in second buffer. In case of
|
|
|
|
// partial data, prefetch remaining bytes from size n synchronously to
|
|
|
|
// fulfill the requested bytes request.
|
|
|
|
// Case3: If curr_ has partial data, prefetch remaining bytes from size n
|
|
|
|
// synchronously in curr_ to fulfill the requested bytes request and
|
|
|
|
// prefetch readahead_size_/2 bytes async in second buffer.
|
|
|
|
// Case4: (Special case) If data is in both buffers, copy requested data from
|
|
|
|
// curr_, send async request on curr_, wait for poll to fill second
|
|
|
|
// buffer (if any), and copy remaining data from second buffer to third
|
|
|
|
// buffer.
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
Status FilePrefetchBuffer::PrefetchAsyncInternal(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t length,
|
|
|
|
size_t readahead_size,
|
|
|
|
bool& copy_to_third_buffer) {
|
2022-11-01 23:06:51 +00:00
|
|
|
if (!enable_) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::PrefetchAsyncInternal:Start");
|
|
|
|
|
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
|
|
|
Status s;
|
|
|
|
uint64_t tmp_offset = offset;
|
|
|
|
size_t tmp_length = length;
|
|
|
|
|
|
|
|
// 1. Abort IO and swap buffers if needed to point curr_ to first buffer with
|
|
|
|
// data.
|
|
|
|
if (!explicit_prefetch_submitted_) {
|
|
|
|
AbortIOIfNeeded(offset);
|
|
|
|
}
|
|
|
|
UpdateBuffersIfNeeded(offset);
|
|
|
|
|
|
|
|
// 2. Handle overlapping data over two buffers. If data is overlapping then
|
|
|
|
// during this call:
|
|
|
|
// - data from curr_ is copied into third buffer,
|
|
|
|
// - curr_ is send for async prefetching of further data if second buffer
|
|
|
|
// contains remaining requested data or in progress for async prefetch,
|
|
|
|
// - switch buffers and curr_ now points to second buffer to copy remaining
|
|
|
|
// data.
|
|
|
|
s = HandleOverlappingData(opts, reader, offset, length, readahead_size,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
copy_to_third_buffer, tmp_offset, tmp_length);
|
2022-11-01 23:06:51 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
|
|
|
|
// 3. Call Poll only if data is needed for the second buffer.
|
2022-11-01 23:06:51 +00:00
|
|
|
// - Return if whole data is in curr_ and second buffer is in progress or
|
|
|
|
// already full.
|
2022-09-13 00:42:01 +00:00
|
|
|
// - If second buffer is empty, it will go for ReadAsync for second buffer.
|
|
|
|
if (!bufs_[curr_].async_read_in_progress_ && DoesBufferContainData(curr_) &&
|
|
|
|
IsDataBlockInBuffer(offset, length, curr_)) {
|
|
|
|
// Whole data is in curr_.
|
|
|
|
UpdateBuffersIfNeeded(offset);
|
2022-11-01 23:06:51 +00:00
|
|
|
if (!IsSecondBuffEligibleForPrefetching()) {
|
2019-09-16 17:31:27 +00:00
|
|
|
return s;
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
} else {
|
2022-11-01 23:06:51 +00:00
|
|
|
// After poll request, curr_ might be empty because of IOError in
|
|
|
|
// callback while reading or may contain required data.
|
2022-09-13 00:42:01 +00:00
|
|
|
PollAndUpdateBuffersIfNeeded(offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_third_buffer) {
|
|
|
|
offset = tmp_offset;
|
|
|
|
length = tmp_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4. After polling and swapping buffers, if all the requested bytes are in
|
|
|
|
// curr_, it will only go for async prefetching.
|
|
|
|
// copy_to_third_buffer is a special case so it will be handled separately.
|
|
|
|
if (!copy_to_third_buffer && DoesBufferContainData(curr_) &&
|
|
|
|
IsDataBlockInBuffer(offset, length, curr_)) {
|
|
|
|
offset += length;
|
|
|
|
length = 0;
|
|
|
|
|
|
|
|
// Since async request was submitted directly by calling PrefetchAsync in
|
|
|
|
// last call, we don't need to prefetch further as this call is to poll
|
|
|
|
// the data submitted in previous call.
|
|
|
|
if (explicit_prefetch_submitted_) {
|
|
|
|
return s;
|
|
|
|
}
|
2022-11-01 23:06:51 +00:00
|
|
|
if (!IsSecondBuffEligibleForPrefetching()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
assert(!bufs_[curr_].async_read_in_progress_);
|
|
|
|
|
|
|
|
// In case because of some IOError curr_ got empty, abort IO for second as
|
|
|
|
// well. Otherwise data might not align if more data needs to be read in curr_
|
|
|
|
// which might overlap with second buffer.
|
|
|
|
if (!DoesBufferContainData(curr_) && bufs_[second].async_read_in_progress_) {
|
|
|
|
if (bufs_[second].io_handle_ != nullptr) {
|
|
|
|
std::vector<void*> handles;
|
|
|
|
handles.emplace_back(bufs_[second].io_handle_);
|
|
|
|
{
|
|
|
|
StopWatch sw(clock_, stats_, ASYNC_PREFETCH_ABORT_MICROS);
|
|
|
|
Status status = fs_->AbortIO(handles);
|
|
|
|
assert(status.ok());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DestroyAndClearIOHandle(second);
|
|
|
|
bufs_[second].buffer_.Clear();
|
2022-09-13 00:42:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// 5. Data is overlapping i.e. some of the data has been copied to third
|
2022-11-01 23:06:51 +00:00
|
|
|
// buffer and remaining will be updated below.
|
|
|
|
if (copy_to_third_buffer && DoesBufferContainData(curr_)) {
|
2022-09-13 00:42:01 +00:00
|
|
|
CopyDataToBuffer(curr_, offset, length);
|
|
|
|
|
|
|
|
// Length == 0: All the requested data has been copied to third buffer and
|
|
|
|
// it has already gone for async prefetching. It can return without doing
|
|
|
|
// anything further.
|
2022-11-01 23:06:51 +00:00
|
|
|
// Length > 0: More data needs to be consumed so it will continue async
|
|
|
|
// and sync prefetching and copy the remaining data to third buffer in the
|
|
|
|
// end.
|
2022-09-13 00:42:01 +00:00
|
|
|
if (length == 0) {
|
|
|
|
return s;
|
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// 6. Go for ReadAsync and Read (if needed).
|
|
|
|
size_t prefetch_size = length + readahead_size;
|
2022-03-21 14:12:43 +00:00
|
|
|
size_t _offset = static_cast<size_t>(offset);
|
|
|
|
|
|
|
|
// offset and size alignment for curr_ buffer with synchronous prefetching
|
|
|
|
uint64_t rounddown_start1 = Rounddown(_offset, alignment);
|
|
|
|
uint64_t roundup_end1 = Roundup(_offset + prefetch_size, alignment);
|
|
|
|
uint64_t roundup_len1 = roundup_end1 - rounddown_start1;
|
|
|
|
assert(roundup_len1 >= alignment);
|
|
|
|
assert(roundup_len1 % alignment == 0);
|
|
|
|
uint64_t chunk_len1 = 0;
|
|
|
|
uint64_t read_len1 = 0;
|
|
|
|
|
2022-11-01 23:06:51 +00:00
|
|
|
assert(!bufs_[second].async_read_in_progress_ &&
|
|
|
|
!DoesBufferContainData(second));
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// For length == 0, skip the synchronous prefetching. read_len1 will be 0.
|
|
|
|
if (length > 0) {
|
|
|
|
CalculateOffsetAndLen(alignment, offset, roundup_len1, curr_,
|
|
|
|
false /*refit_tail*/, chunk_len1);
|
2022-03-26 01:26:22 +00:00
|
|
|
assert(roundup_len1 >= chunk_len1);
|
2022-03-21 14:12:43 +00:00
|
|
|
read_len1 = static_cast<size_t>(roundup_len1 - chunk_len1);
|
2019-09-16 17:31:27 +00:00
|
|
|
}
|
2023-08-29 00:08:28 +00:00
|
|
|
|
|
|
|
// Prefetch in second buffer only if readahead_size_ > 0.
|
|
|
|
if (readahead_size_ > 0) {
|
2022-03-21 14:12:43 +00:00
|
|
|
// offset and size alignment for second buffer for asynchronous
|
|
|
|
// prefetching
|
|
|
|
uint64_t rounddown_start2 = roundup_end1;
|
|
|
|
uint64_t roundup_end2 =
|
|
|
|
Roundup(rounddown_start2 + readahead_size, alignment);
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// For length == 0, do the asynchronous prefetching in second instead of
|
2022-03-26 01:26:22 +00:00
|
|
|
// synchronous prefetching in curr_.
|
2022-03-21 14:12:43 +00:00
|
|
|
if (length == 0) {
|
|
|
|
rounddown_start2 =
|
|
|
|
bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize();
|
|
|
|
roundup_end2 = Roundup(rounddown_start2 + prefetch_size, alignment);
|
|
|
|
}
|
|
|
|
|
2023-09-20 23:13:20 +00:00
|
|
|
// Second buffer might be out of bound if first buffer already prefetched
|
|
|
|
// that data.
|
|
|
|
if (!IsOffsetOutOfBound(rounddown_start2)) {
|
|
|
|
uint64_t roundup_len2 = roundup_end2 - rounddown_start2;
|
|
|
|
uint64_t chunk_len2 = 0;
|
|
|
|
CalculateOffsetAndLen(alignment, rounddown_start2, roundup_len2, second,
|
|
|
|
false /*refit_tail*/, chunk_len2);
|
|
|
|
assert(chunk_len2 == 0);
|
|
|
|
// Update the buffer offset.
|
|
|
|
bufs_[second].offset_ = rounddown_start2;
|
|
|
|
assert(roundup_len2 >= chunk_len2);
|
|
|
|
uint64_t read_len2 = static_cast<size_t>(roundup_len2 - chunk_len2);
|
|
|
|
s = ReadAsync(opts, reader, read_len2, rounddown_start2, second);
|
|
|
|
if (!s.ok()) {
|
|
|
|
DestroyAndClearIOHandle(second);
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
return s;
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
}
|
2020-10-14 17:43:37 +00:00
|
|
|
}
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
if (read_len1 > 0) {
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
s = Read(opts, reader, read_len1, chunk_len1, rounddown_start1, curr_);
|
2022-03-21 14:12:43 +00:00
|
|
|
if (!s.ok()) {
|
2022-09-13 00:42:01 +00:00
|
|
|
if (bufs_[second].io_handle_ != nullptr) {
|
|
|
|
std::vector<void*> handles;
|
|
|
|
handles.emplace_back(bufs_[second].io_handle_);
|
|
|
|
{
|
|
|
|
StopWatch sw(clock_, stats_, ASYNC_PREFETCH_ABORT_MICROS);
|
|
|
|
Status status = fs_->AbortIO(handles);
|
|
|
|
assert(status.ok());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DestroyAndClearIOHandle(second);
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
2022-03-21 14:12:43 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Copy remaining requested bytes to third_buffer.
|
|
|
|
if (copy_to_third_buffer && length > 0) {
|
|
|
|
CopyDataToBuffer(curr_, offset, length);
|
2020-04-24 20:03:08 +00:00
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:51:57 +00:00
|
|
|
bool FilePrefetchBuffer::TryReadFromCache(const IOOptions& opts,
|
2021-11-20 01:52:42 +00:00
|
|
|
RandomAccessFileReader* reader,
|
2020-06-29 21:51:57 +00:00
|
|
|
uint64_t offset, size_t n,
|
2020-12-30 17:24:04 +00:00
|
|
|
Slice* result, Status* status,
|
2022-02-17 07:17:03 +00:00
|
|
|
bool for_compaction /* = false */) {
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
bool ret = TryReadFromCacheUntracked(opts, reader, offset, n, result, status,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
for_compaction);
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
if (usage_ == FilePrefetchBufferUsage::kTableOpenPrefetchTail && enable_) {
|
|
|
|
if (ret) {
|
|
|
|
RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_HIT);
|
|
|
|
} else {
|
|
|
|
RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_MISS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FilePrefetchBuffer::TryReadFromCacheUntracked(
|
|
|
|
const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset,
|
|
|
|
size_t n, Slice* result, Status* status,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
bool for_compaction /* = false */) {
|
2019-09-16 17:31:27 +00:00
|
|
|
if (track_min_offset_ && offset < min_offset_read_) {
|
|
|
|
min_offset_read_ = static_cast<size_t>(offset);
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
if (!enable_ || (offset < bufs_[curr_].offset_)) {
|
2019-09-16 17:31:27 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the buffer contains only a few of the requested bytes:
|
2021-02-18 22:29:36 +00:00
|
|
|
// If readahead is enabled: prefetch the remaining bytes + readahead bytes
|
2019-09-16 17:31:27 +00:00
|
|
|
// and satisfy the request.
|
|
|
|
// If readahead is not enabled: return false.
|
2021-12-01 06:52:14 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("FilePrefetchBuffer::TryReadFromCache",
|
|
|
|
&readahead_size_);
|
2022-03-21 14:12:43 +00:00
|
|
|
if (offset + n > bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize()) {
|
2019-09-16 17:31:27 +00:00
|
|
|
if (readahead_size_ > 0) {
|
2022-03-21 14:12:43 +00:00
|
|
|
Status s;
|
2021-11-20 01:52:42 +00:00
|
|
|
assert(reader != nullptr);
|
2019-09-16 17:31:27 +00:00
|
|
|
assert(max_readahead_size_ >= readahead_size_);
|
|
|
|
if (for_compaction) {
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
s = Prefetch(opts, reader, offset, std::max(n, readahead_size_));
|
2019-09-16 17:31:27 +00:00
|
|
|
} else {
|
2021-04-28 19:52:53 +00:00
|
|
|
if (implicit_auto_readahead_) {
|
2022-03-21 14:12:43 +00:00
|
|
|
if (!IsEligibleForPrefetch(offset, n)) {
|
2021-04-28 19:52:53 +00:00
|
|
|
// Ignore status as Prefetch is not called.
|
|
|
|
s.PermitUncheckedError();
|
|
|
|
return false;
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2023-09-23 01:12:08 +00:00
|
|
|
size_t current_readahead_size = ReadAheadSizeTuning(offset, n);
|
|
|
|
s = Prefetch(opts, reader, offset, n + current_readahead_size);
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
if (status) {
|
|
|
|
*status = s;
|
|
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
|
|
IGNORE_STATUS_IF_ERROR(s);
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
readahead_size_ = std::min(max_readahead_size_, readahead_size_ * 2);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
UpdateReadPattern(offset, n, false /*decrease_readaheadsize*/);
|
|
|
|
|
|
|
|
uint64_t offset_in_buffer = offset - bufs_[curr_].offset_;
|
|
|
|
*result = Slice(bufs_[curr_].buffer_.BufferStart() + offset_in_buffer, n);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
bool FilePrefetchBuffer::TryReadFromCacheAsync(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t n,
|
|
|
|
Slice* result, Status* status) {
|
|
|
|
bool ret =
|
|
|
|
TryReadFromCacheAsyncUntracked(opts, reader, offset, n, result, status);
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
if (usage_ == FilePrefetchBufferUsage::kTableOpenPrefetchTail && enable_) {
|
|
|
|
if (ret) {
|
|
|
|
RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_HIT);
|
|
|
|
} else {
|
|
|
|
RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_MISS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FilePrefetchBuffer::TryReadFromCacheAsyncUntracked(
|
|
|
|
const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
size_t n, Slice* result, Status* status) {
|
2022-03-21 14:12:43 +00:00
|
|
|
if (track_min_offset_ && offset < min_offset_read_) {
|
|
|
|
min_offset_read_ = static_cast<size_t>(offset);
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2022-05-23 19:15:26 +00:00
|
|
|
if (!enable_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
if (explicit_prefetch_submitted_) {
|
2022-11-01 23:06:51 +00:00
|
|
|
// explicit_prefetch_submitted_ is special case where it expects request
|
|
|
|
// submitted in PrefetchAsync should match with this request. Otherwise
|
|
|
|
// buffers will be outdated.
|
|
|
|
// Random offset called. So abort the IOs.
|
2022-11-11 21:34:49 +00:00
|
|
|
if (prev_offset_ != offset) {
|
2022-09-13 00:42:01 +00:00
|
|
|
AbortAllIOs();
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
bufs_[curr_ ^ 1].buffer_.Clear();
|
|
|
|
explicit_prefetch_submitted_ = false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!explicit_prefetch_submitted_ && offset < bufs_[curr_].offset_) {
|
2022-03-21 14:12:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool prefetched = false;
|
|
|
|
bool copy_to_third_buffer = false;
|
|
|
|
// If the buffer contains only a few of the requested bytes:
|
|
|
|
// If readahead is enabled: prefetch the remaining bytes + readahead bytes
|
|
|
|
// and satisfy the request.
|
|
|
|
// If readahead is not enabled: return false.
|
|
|
|
TEST_SYNC_POINT_CALLBACK("FilePrefetchBuffer::TryReadFromCache",
|
|
|
|
&readahead_size_);
|
2022-09-13 00:42:01 +00:00
|
|
|
|
|
|
|
if (explicit_prefetch_submitted_ ||
|
|
|
|
(bufs_[curr_].async_read_in_progress_ ||
|
|
|
|
offset + n >
|
|
|
|
bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize())) {
|
2023-08-29 00:08:28 +00:00
|
|
|
// In case readahead_size is trimmed (=0), we still want to poll the data
|
|
|
|
// submitted with explicit_prefetch_submitted_=true.
|
|
|
|
if (readahead_size_ > 0 || explicit_prefetch_submitted_) {
|
2022-03-21 14:12:43 +00:00
|
|
|
Status s;
|
|
|
|
assert(reader != nullptr);
|
|
|
|
assert(max_readahead_size_ >= readahead_size_);
|
2022-05-23 19:15:26 +00:00
|
|
|
|
|
|
|
if (implicit_auto_readahead_) {
|
|
|
|
if (!IsEligibleForPrefetch(offset, n)) {
|
|
|
|
// Ignore status as Prefetch is not called.
|
|
|
|
s.PermitUncheckedError();
|
|
|
|
return false;
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
}
|
2023-08-18 22:52:04 +00:00
|
|
|
|
|
|
|
UpdateReadAheadSizeForUpperBound(offset, n);
|
|
|
|
|
2022-05-23 19:15:26 +00:00
|
|
|
// Prefetch n + readahead_size_/2 synchronously as remaining
|
|
|
|
// readahead_size_/2 will be prefetched asynchronously.
|
|
|
|
s = PrefetchAsyncInternal(opts, reader, offset, n, readahead_size_ / 2,
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
copy_to_third_buffer);
|
2022-09-13 00:42:01 +00:00
|
|
|
explicit_prefetch_submitted_ = false;
|
2019-09-16 17:31:27 +00:00
|
|
|
if (!s.ok()) {
|
2020-12-30 17:24:04 +00:00
|
|
|
if (status) {
|
|
|
|
*status = s;
|
|
|
|
}
|
2020-10-20 16:11:50 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
IGNORE_STATUS_IF_ERROR(s);
|
|
|
|
#endif
|
2019-09-16 17:31:27 +00:00
|
|
|
return false;
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
prefetched = explicit_prefetch_submitted_ ? false : true;
|
2019-09-16 17:31:27 +00:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
UpdateReadPattern(offset, n, false /*decrease_readaheadsize*/);
|
|
|
|
|
|
|
|
uint32_t index = curr_;
|
|
|
|
if (copy_to_third_buffer) {
|
|
|
|
index = 2;
|
|
|
|
}
|
|
|
|
uint64_t offset_in_buffer = offset - bufs_[index].offset_;
|
|
|
|
*result = Slice(bufs_[index].buffer_.BufferStart() + offset_in_buffer, n);
|
|
|
|
if (prefetched) {
|
|
|
|
readahead_size_ = std::min(max_readahead_size_, readahead_size_ * 2);
|
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
return true;
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
void FilePrefetchBuffer::PrefetchAsyncCallback(const FSReadRequest& req,
|
2022-09-13 00:42:01 +00:00
|
|
|
void* cb_arg) {
|
|
|
|
uint32_t index = *(static_cast<uint32_t*>(cb_arg));
|
Fix stress test failure in ReadAsync. (#9824)
Summary:
Fix stress test failure in ReadAsync by ignoring errors
injected during async read by FaultInjectionFS.
Failure:
```
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
Didn't get expected error from MultiGet.
num_keys 14 Expected 1 errors, seen 0
Callstack that injected the fault
Injected error type = 32538
Message: error;
#0 ./db_stress() [0x6f7dd4] rocksdb::port::SaveStack(int*, int) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/port/stack_trace.cc:152
https://github.com/facebook/rocksdb/issues/1 ./db_stress() [0x7f2bda] rocksdb::FaultInjectionTestFS::InjectThreadSpecificReadError(rocksdb::FaultInjectionTestFS::ErrorOperation, rocksdb::Slice*, bool, char*, bool, bool*) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:891
https://github.com/facebook/rocksdb/issues/2 ./db_stress() [0x7f2e78] rocksdb::TestFSRandomAccessFile::Read(unsigned long, unsigned long, rocksdb::IOOptions const&, rocksdb::Slice*, char*, rocksdb::IODebugContext*) const /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:367
https://github.com/facebook/rocksdb/issues/3 ./db_stress() [0x6483d7] rocksdb::(anonymous namespace)::CompositeRandomAccessFileWrapper::Read(unsigned long, unsigned long, rocksdb::Slice*, char*) const /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/env/composite_env.cc:61
https://github.com/facebook/rocksdb/issues/4 ./db_stress() [0x654564] rocksdb::(anonymous namespace)::LegacyRandomAccessFileWrapper::Read(unsigned long, unsigned long, rocksdb::IOOptions const&, rocksdb::Slice*, char*, rocksdb::IODebugContext*) const /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/env/env.cc:152
https://github.com/facebook/rocksdb/issues/5 ./db_stress() [0x659b3b] rocksdb::FSRandomAccessFile::ReadAsync(rocksdb::FSReadRequest&, rocksdb::IOOptions const&, std::function<void (rocksdb::FSReadRequest const&, void*)>, void*, void**, std::function<void (void*)>*, rocksdb::IODebugContext*) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/./include/rocksdb/file_system.h:896
https://github.com/facebook/rocksdb/issues/6 ./db_stress() [0x8b8bab] rocksdb::RandomAccessFileReader::ReadAsync(rocksdb::FSReadRequest&, rocksdb::IOOptions const&, std::function<void (rocksdb::FSReadRequest const&, void*)>, void*, void**, std::function<void (void*)>*, rocksdb::Env::IOPriority) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/random_access_file_reader.cc:459
https://github.com/facebook/rocksdb/issues/7 ./db_stress() [0x8b501f] rocksdb::FilePrefetchBuffer::ReadAsync(rocksdb::IOOptions const&, rocksdb::RandomAccessFileReader*, rocksdb::Env::IOPriority, unsigned long, unsigned long, unsigned long, unsigned int) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/file_prefetch_buffer.cc:124
https://github.com/facebook/rocksdb/issues/8 ./db_stress() [0x8b55fc] rocksdb::FilePrefetchBuffer::PrefetchAsync(rocksdb::IOOptions const&, rocksdb::RandomAccessFileReader*, unsigned long, unsigned long, unsigned long, rocksdb::Env::IOPriority, bool&) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/file_prefetch_buffer.cc:363
https://github.com/facebook/rocksdb/issues/9 ./db_stress() [0x8b61f8] rocksdb::FilePrefetchBuffer::TryReadFromCacheAsync(rocksdb::IOOptions const&, rocksdb::RandomAccessFileReader*, unsigned long, unsigned long, rocksdb::Slice*, rocksdb::Status*, rocksdb::Env::IOPriority, bool) /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/file/file_prefetch_buffer.cc:482
https://github.com/facebook/rocksdb/issues/10 ./db_stress() [0x745e04] rocksdb::BlockFetcher::TryGetFromPrefetchBuffer() /data/sandcastle/boxes/trunk-hg-fbcode-fbsource/fbcode/internal_repo_rocksdb/repo/table/block_fetcher.cc:76
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9824
Test Plan:
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_concurrent_memtable_write=0 --async_io=1 --atomic_flush=1 --avoid_flush_during_recovery=0 --avoid_unnecessary_blocking_io=0 -- backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=0 --block_size=16384 --bloom_bits=5.037629726741734 --bottommost_compression_type=lz4hc --cache_index_and_filter_blocks=0 --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kxxHash --clear_column_family_one_in=0 --column_families=1 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_ttl=100 --compression_max_dict_buffer_bytes=1073741823 --compression_max_dict_bytes=16384 --compression_parallel_threads=1 --compression_type=zstd --compression_zstd_max_train_bytes=0 --continuous_verification_interval=0 --db=/home/akankshamahajan/dev/shm/rocksdb/rocksdb_crashtest_blackbox --db_write_buffer_size=8388608 --delpercent=0 --delrangepercent=0 --destroy_db_initially=0 - detect_filter_construct_corruption=1 --disable_wal=1 --enable_compaction_filter=0 --enable_pipelined_write=0 --expected_values_dir=/home/akankshamahajan/dev/shm/rocksdb/rocksdb_crashtest_expected --experimental_mempurge_threshold=8.772789063014715 --fail_if_options_file_error=0 --file_checksum_impl=crc32c --flush_one_in=1000000 --format_version=3 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=15 --index_type=3 --iterpercent=0 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=False --long_running_snapshots=0 --mark_for_compaction_one_file_in=0 --max_background_compactions=1 --max_bytes_for_level_base=67108864 --max_key=25000000 --max_key_len=3 --max_manifest_file_size=1073741824 --max_write_batch_group_size_bytes=16777216 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=2097152 --memtable_prefix_bloom_size_ratio=0.001 --memtable_whole_key_filtering=1 --memtablerep=skip_list --mmap_read=0 --mock_direct_io=True --nooverwritepercent=1 --open_files=-1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=0 --paranoid_file_checks=1 --partition_filters=0 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=1000 --prefix_size=-1 --prefixpercent=0 --prepopulate_block_cache=0 --progress_reports=0 --read_fault_one_in=32 --readpercent=100 --recycle_log_file_num=1 --reopen=0 --reserve_table_reader_memory=1 --ribbon_starting_level=999 --secondary_cache_fault_one_in=0 --set_options_one_in=0 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=2 --sync=0 --sync_fault_injection=False --target_file_size_base=16777216 --target_file_size_multiplier=1 --test_batches_snapshots=0 --top_level_index_pinning=3 --unpartitioned_pinning=2 --use_block_based_filter=0 --use_clock_cache=0 --use_direct_io_for_flush_and_compaction=1 --use_direct_reads=0 --use_full_merge_v1=0 --use_merge=1 --use_multiget=1 --user_timestamp_size=0 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --wal_compression=none --write_buffer_size=33554432 --write_dbid_to_manifest=1 --write_fault_one_in=0 --writepercent=0
```
Reviewed By: anand1976
Differential Revision: D35514566
Pulled By: akankshamahajan15
fbshipit-source-id: e2a868fdd7422604774c1419738f9926a21e92a4
2022-04-11 17:56:11 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (req.result.size() < req.len) {
|
|
|
|
// Fake an IO error to force db_stress fault injection to ignore
|
|
|
|
// truncated read errors
|
|
|
|
IGNORE_STATUS_IF_ERROR(Status::IOError());
|
|
|
|
}
|
|
|
|
IGNORE_STATUS_IF_ERROR(req.status);
|
|
|
|
#endif
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
if (req.status.ok()) {
|
|
|
|
if (req.offset + req.result.size() <=
|
|
|
|
bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize()) {
|
2022-07-06 18:42:59 +00:00
|
|
|
// All requested bytes are already in the buffer or no data is read
|
|
|
|
// because of EOF. So no need to update.
|
2022-03-21 14:12:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (req.offset < bufs_[index].offset_) {
|
|
|
|
// Next block to be read has changed (Recent read was not a sequential
|
|
|
|
// read). So ignore this read.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
size_t current_size = bufs_[index].buffer_.CurrentSize();
|
|
|
|
bufs_[index].buffer_.Size(current_size + req.result.size());
|
|
|
|
}
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
|
|
|
Status FilePrefetchBuffer::PrefetchAsync(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t n,
|
|
|
|
Slice* result) {
|
|
|
|
assert(reader != nullptr);
|
|
|
|
if (!enable_) {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::PrefetchAsync:Start");
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
num_file_reads_ = 0;
|
|
|
|
explicit_prefetch_submitted_ = false;
|
|
|
|
bool is_eligible_for_prefetching = false;
|
2023-08-29 00:08:28 +00:00
|
|
|
|
|
|
|
UpdateReadAheadSizeForUpperBound(offset, n);
|
2022-09-13 00:42:01 +00:00
|
|
|
if (readahead_size_ > 0 &&
|
|
|
|
(!implicit_auto_readahead_ ||
|
2023-06-26 17:39:44 +00:00
|
|
|
num_file_reads_ >= num_file_reads_for_auto_readahead_)) {
|
2023-08-18 22:52:04 +00:00
|
|
|
is_eligible_for_prefetching = true;
|
2022-09-13 00:42:01 +00:00
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// 1. Cancel any pending async read to make code simpler as buffers can be out
|
|
|
|
// of sync.
|
|
|
|
AbortAllIOs();
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// 2. Clear outdated data.
|
|
|
|
UpdateBuffersIfNeeded(offset);
|
|
|
|
uint32_t second = curr_ ^ 1;
|
2022-05-23 19:15:26 +00:00
|
|
|
// Since PrefetchAsync can be called on non sequential reads. So offset can
|
2022-09-13 00:42:01 +00:00
|
|
|
// be less than curr_ buffers' offset. In that case also it clears both
|
|
|
|
// buffers.
|
|
|
|
if (DoesBufferContainData(curr_) && !IsOffsetInBuffer(offset, curr_)) {
|
2022-05-20 23:09:33 +00:00
|
|
|
bufs_[curr_].buffer_.Clear();
|
2022-09-13 00:42:01 +00:00
|
|
|
bufs_[second].buffer_.Clear();
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
UpdateReadPattern(offset, n, /*decrease_readaheadsize=*/false);
|
|
|
|
|
|
|
|
bool data_found = false;
|
|
|
|
|
|
|
|
// 3. If curr_ has full data.
|
|
|
|
if (DoesBufferContainData(curr_) && IsDataBlockInBuffer(offset, n, curr_)) {
|
2022-05-20 23:09:33 +00:00
|
|
|
uint64_t offset_in_buffer = offset - bufs_[curr_].offset_;
|
|
|
|
*result = Slice(bufs_[curr_].buffer_.BufferStart() + offset_in_buffer, n);
|
2022-09-13 00:42:01 +00:00
|
|
|
data_found = true;
|
|
|
|
// Update num_file_reads_ as TryReadFromCacheAsync won't be called for
|
|
|
|
// poll and update num_file_reads_ if data is found.
|
|
|
|
num_file_reads_++;
|
|
|
|
|
|
|
|
// 3.1 If second also has some data or is not eligible for prefetching,
|
|
|
|
// return.
|
|
|
|
if (!is_eligible_for_prefetching || DoesBufferContainData(second)) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Partial data in curr_.
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
bufs_[second].buffer_.Clear();
|
2022-05-20 23:09:33 +00:00
|
|
|
|
|
|
|
Status s;
|
|
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
2022-09-13 00:42:01 +00:00
|
|
|
size_t prefetch_size = is_eligible_for_prefetching ? readahead_size_ / 2 : 0;
|
2022-05-20 23:09:33 +00:00
|
|
|
size_t offset_to_read = static_cast<size_t>(offset);
|
2022-09-13 00:42:01 +00:00
|
|
|
uint64_t rounddown_start1 = 0;
|
|
|
|
uint64_t roundup_end1 = 0;
|
|
|
|
uint64_t rounddown_start2 = 0;
|
|
|
|
uint64_t roundup_end2 = 0;
|
|
|
|
uint64_t chunk_len1 = 0;
|
|
|
|
uint64_t chunk_len2 = 0;
|
|
|
|
size_t read_len1 = 0;
|
|
|
|
size_t read_len2 = 0;
|
|
|
|
|
|
|
|
// - If curr_ is empty.
|
|
|
|
// - Call async read for full data + prefetch_size on curr_.
|
|
|
|
// - Call async read for prefetch_size on second if eligible.
|
|
|
|
// - If curr_ is filled.
|
|
|
|
// - prefetch_size on second.
|
|
|
|
// Calculate length and offsets for reading.
|
|
|
|
if (!DoesBufferContainData(curr_)) {
|
2023-09-11 18:41:44 +00:00
|
|
|
uint64_t roundup_len1;
|
2022-09-13 00:42:01 +00:00
|
|
|
// Prefetch full data + prefetch_size in curr_.
|
2023-09-11 18:41:44 +00:00
|
|
|
if (is_eligible_for_prefetching || reader->use_direct_io()) {
|
|
|
|
rounddown_start1 = Rounddown(offset_to_read, alignment);
|
|
|
|
roundup_end1 = Roundup(offset_to_read + n + prefetch_size, alignment);
|
|
|
|
roundup_len1 = roundup_end1 - rounddown_start1;
|
|
|
|
assert(roundup_len1 >= alignment);
|
|
|
|
assert(roundup_len1 % alignment == 0);
|
|
|
|
} else {
|
|
|
|
rounddown_start1 = offset_to_read;
|
|
|
|
roundup_end1 = offset_to_read + n;
|
|
|
|
roundup_len1 = roundup_end1 - rounddown_start1;
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
CalculateOffsetAndLen(alignment, rounddown_start1, roundup_len1, curr_,
|
|
|
|
false, chunk_len1);
|
|
|
|
assert(chunk_len1 == 0);
|
|
|
|
assert(roundup_len1 >= chunk_len1);
|
2023-09-11 18:41:44 +00:00
|
|
|
read_len1 = static_cast<size_t>(roundup_len1);
|
2022-09-13 00:42:01 +00:00
|
|
|
bufs_[curr_].offset_ = rounddown_start1;
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
if (is_eligible_for_prefetching) {
|
|
|
|
if (DoesBufferContainData(curr_)) {
|
|
|
|
rounddown_start2 =
|
|
|
|
bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize();
|
|
|
|
} else {
|
|
|
|
rounddown_start2 = roundup_end1;
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
|
2023-09-20 23:13:20 +00:00
|
|
|
// Second buffer might be out of bound if first buffer already prefetched
|
|
|
|
// that data.
|
|
|
|
if (!IsOffsetOutOfBound(rounddown_start2)) {
|
|
|
|
roundup_end2 = Roundup(rounddown_start2 + prefetch_size, alignment);
|
|
|
|
uint64_t roundup_len2 = roundup_end2 - rounddown_start2;
|
|
|
|
|
|
|
|
assert(roundup_len2 >= alignment);
|
|
|
|
|
|
|
|
CalculateOffsetAndLen(alignment, rounddown_start2, roundup_len2, second,
|
|
|
|
false, chunk_len2);
|
|
|
|
assert(chunk_len2 == 0);
|
|
|
|
assert(roundup_len2 >= chunk_len2);
|
|
|
|
read_len2 = static_cast<size_t>(roundup_len2 - chunk_len2);
|
|
|
|
// Update the buffer offset.
|
|
|
|
bufs_[second].offset_ = rounddown_start2;
|
|
|
|
}
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
if (read_len1) {
|
|
|
|
s = ReadAsync(opts, reader, read_len1, rounddown_start1, curr_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
DestroyAndClearIOHandle(curr_);
|
|
|
|
bufs_[curr_].buffer_.Clear();
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
explicit_prefetch_submitted_ = true;
|
|
|
|
prev_len_ = 0;
|
|
|
|
}
|
|
|
|
if (read_len2) {
|
2023-06-26 17:39:44 +00:00
|
|
|
TEST_SYNC_POINT("FilePrefetchBuffer::PrefetchAsync:ExtraPrefetching");
|
2022-09-13 00:42:01 +00:00
|
|
|
s = ReadAsync(opts, reader, read_len2, rounddown_start2, second);
|
|
|
|
if (!s.ok()) {
|
|
|
|
DestroyAndClearIOHandle(second);
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
readahead_size_ = std::min(max_readahead_size_, readahead_size_ * 2);
|
|
|
|
}
|
|
|
|
return (data_found ? Status::OK() : Status::TryAgain());
|
2022-05-20 23:09:33 +00:00
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|