2019-09-16 17:31:27 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
2022-11-01 23:06:51 +00:00
|
|
|
|
2021-11-11 00:18:27 +00:00
|
|
|
#include <algorithm>
|
2019-09-16 17:31:27 +00:00
|
|
|
#include <atomic>
|
|
|
|
#include <sstream>
|
|
|
|
#include <string>
|
2020-06-29 21:51:57 +00:00
|
|
|
|
2021-11-11 00:18:27 +00:00
|
|
|
#include "file/readahead_file_info.h"
|
2022-04-26 04:58:22 +00:00
|
|
|
#include "monitoring/statistics.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "port/port.h"
|
|
|
|
#include "rocksdb/env.h"
|
2022-03-21 14:12:43 +00:00
|
|
|
#include "rocksdb/file_system.h"
|
2020-06-29 21:51:57 +00:00
|
|
|
#include "rocksdb/options.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "util/aligned_buffer.h"
|
2022-09-13 00:42:01 +00:00
|
|
|
#include "util/autovector.h"
|
2022-08-29 21:37:44 +00:00
|
|
|
#include "util/stop_watch.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
#define DEFAULT_DECREMENT 8 * 1024
|
2021-11-11 00:18:27 +00:00
|
|
|
|
2021-11-20 01:52:42 +00:00
|
|
|
struct IOOptions;
|
|
|
|
class RandomAccessFileReader;
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
struct BufferInfo {
|
|
|
|
AlignedBuffer buffer_;
|
2022-09-13 00:42:01 +00:00
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
uint64_t offset_ = 0;
|
2022-09-13 00:42:01 +00:00
|
|
|
|
|
|
|
// Below parameters are used in case of async read flow.
|
|
|
|
// Length requested for in ReadAsync.
|
|
|
|
size_t async_req_len_ = 0;
|
|
|
|
|
|
|
|
// async_read_in_progress can be used as mutex. Callback can update the buffer
|
|
|
|
// and its size but async_read_in_progress is only set by main thread.
|
|
|
|
bool async_read_in_progress_ = false;
|
|
|
|
|
|
|
|
// io_handle is allocated and used by underlying file system in case of
|
|
|
|
// asynchronous reads.
|
|
|
|
void* io_handle_ = nullptr;
|
|
|
|
|
|
|
|
IOHandleDeleter del_fn_ = nullptr;
|
|
|
|
|
|
|
|
// pos represents the index of this buffer in vector of BufferInfo.
|
|
|
|
uint32_t pos_ = 0;
|
2022-03-21 14:12:43 +00:00
|
|
|
};
|
|
|
|
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
enum class FilePrefetchBufferUsage {
|
|
|
|
kTableOpenPrefetchTail,
|
|
|
|
kUnknown,
|
|
|
|
};
|
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
// FilePrefetchBuffer is a smart buffer to store and read data from a file.
|
|
|
|
class FilePrefetchBuffer {
|
|
|
|
public:
|
|
|
|
// Constructor.
|
|
|
|
//
|
|
|
|
// All arguments are optional.
|
|
|
|
// readahead_size : the initial readahead size.
|
|
|
|
// max_readahead_size : the maximum readahead size.
|
|
|
|
// If max_readahead_size > readahead_size, the readahead size will be
|
|
|
|
// doubled on every IO until max_readahead_size is hit.
|
|
|
|
// Typically this is set as a multiple of readahead_size.
|
|
|
|
// max_readahead_size should be greater than equal to readahead_size.
|
|
|
|
// enable : controls whether reading from the buffer is enabled.
|
|
|
|
// If false, TryReadFromCache() always return false, and we only take stats
|
|
|
|
// for the minimum offset if track_min_offset = true.
|
|
|
|
// track_min_offset : Track the minimum offset ever read and collect stats on
|
|
|
|
// it. Used for adaptable readahead of the file footer/metadata.
|
2021-04-28 19:52:53 +00:00
|
|
|
// implicit_auto_readahead : Readahead is enabled implicitly by rocksdb after
|
|
|
|
// doing sequential scans for two times.
|
2019-09-16 17:31:27 +00:00
|
|
|
//
|
2021-11-20 01:52:42 +00:00
|
|
|
// Automatic readhead is enabled for a file if readahead_size
|
2019-09-16 17:31:27 +00:00
|
|
|
// and max_readahead_size are passed in.
|
|
|
|
// A user can construct a FilePrefetchBuffer without any arguments, but use
|
|
|
|
// `Prefetch` to load data into the buffer.
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
FilePrefetchBuffer(
|
|
|
|
size_t readahead_size = 0, size_t max_readahead_size = 0,
|
|
|
|
bool enable = true, bool track_min_offset = false,
|
|
|
|
bool implicit_auto_readahead = false, uint64_t num_file_reads = 0,
|
|
|
|
uint64_t num_file_reads_for_auto_readahead = 0, FileSystem* fs = nullptr,
|
|
|
|
SystemClock* clock = nullptr, Statistics* stats = nullptr,
|
|
|
|
FilePrefetchBufferUsage usage = FilePrefetchBufferUsage::kUnknown)
|
2022-03-21 14:12:43 +00:00
|
|
|
: curr_(0),
|
2021-02-18 22:29:36 +00:00
|
|
|
readahead_size_(readahead_size),
|
2022-04-16 00:28:09 +00:00
|
|
|
initial_auto_readahead_size_(readahead_size),
|
2019-09-16 17:31:27 +00:00
|
|
|
max_readahead_size_(max_readahead_size),
|
2022-05-05 20:08:21 +00:00
|
|
|
min_offset_read_(std::numeric_limits<size_t>::max()),
|
2019-09-16 17:31:27 +00:00
|
|
|
enable_(enable),
|
2021-04-28 19:52:53 +00:00
|
|
|
track_min_offset_(track_min_offset),
|
|
|
|
implicit_auto_readahead_(implicit_auto_readahead),
|
|
|
|
prev_offset_(0),
|
|
|
|
prev_len_(0),
|
2022-09-01 18:56:00 +00:00
|
|
|
num_file_reads_for_auto_readahead_(num_file_reads_for_auto_readahead),
|
2022-06-16 03:17:35 +00:00
|
|
|
num_file_reads_(num_file_reads),
|
2022-09-13 00:42:01 +00:00
|
|
|
explicit_prefetch_submitted_(false),
|
2022-04-26 04:58:22 +00:00
|
|
|
fs_(fs),
|
|
|
|
clock_(clock),
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
stats_(stats),
|
|
|
|
usage_(usage) {
|
2022-09-01 18:56:00 +00:00
|
|
|
assert((num_file_reads_ >= num_file_reads_for_auto_readahead_ + 1) ||
|
2022-06-16 03:17:35 +00:00
|
|
|
(num_file_reads_ == 0));
|
2022-09-13 00:42:01 +00:00
|
|
|
// If ReadOptions.async_io is enabled, data is asynchronously filled in
|
|
|
|
// second buffer while curr_ is being consumed. If data is overlapping in
|
|
|
|
// two buffers, data is copied to third buffer to return continuous buffer.
|
2022-03-21 14:12:43 +00:00
|
|
|
bufs_.resize(3);
|
2022-09-13 00:42:01 +00:00
|
|
|
for (uint32_t i = 0; i < 2; i++) {
|
|
|
|
bufs_[i].pos_ = i;
|
|
|
|
}
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-04-04 22:35:43 +00:00
|
|
|
~FilePrefetchBuffer() {
|
2022-04-26 04:58:22 +00:00
|
|
|
// Abort any pending async read request before destroying the class object.
|
2022-09-13 00:42:01 +00:00
|
|
|
if (fs_ != nullptr) {
|
2022-04-04 22:35:43 +00:00
|
|
|
std::vector<void*> handles;
|
2022-09-13 00:42:01 +00:00
|
|
|
for (uint32_t i = 0; i < 2; i++) {
|
|
|
|
if (bufs_[i].async_read_in_progress_ &&
|
|
|
|
bufs_[i].io_handle_ != nullptr) {
|
|
|
|
handles.emplace_back(bufs_[i].io_handle_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!handles.empty()) {
|
|
|
|
StopWatch sw(clock_, stats_, ASYNC_PREFETCH_ABORT_MICROS);
|
|
|
|
Status s = fs_->AbortIO(handles);
|
|
|
|
assert(s.ok());
|
|
|
|
}
|
2022-04-04 22:35:43 +00:00
|
|
|
}
|
2022-04-26 04:58:22 +00:00
|
|
|
|
|
|
|
// Prefetch buffer bytes discarded.
|
|
|
|
uint64_t bytes_discarded = 0;
|
2022-08-29 21:37:44 +00:00
|
|
|
// Iterated over 2 buffers.
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int first = i;
|
|
|
|
int second = i ^ 1;
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
if (DoesBufferContainData(first)) {
|
2022-08-29 21:37:44 +00:00
|
|
|
// If last block was read completely from first and some bytes in
|
|
|
|
// first buffer are still unconsumed.
|
|
|
|
if (prev_offset_ >= bufs_[first].offset_ &&
|
|
|
|
prev_offset_ + prev_len_ <
|
|
|
|
bufs_[first].offset_ + bufs_[first].buffer_.CurrentSize()) {
|
|
|
|
bytes_discarded += bufs_[first].buffer_.CurrentSize() -
|
|
|
|
(prev_offset_ + prev_len_ - bufs_[first].offset_);
|
|
|
|
}
|
|
|
|
// If data was in second buffer and some/whole block bytes were read
|
|
|
|
// from second buffer.
|
|
|
|
else if (prev_offset_ < bufs_[first].offset_ &&
|
2022-09-13 00:42:01 +00:00
|
|
|
!DoesBufferContainData(second)) {
|
2022-08-29 21:37:44 +00:00
|
|
|
// If last block read was completely from different buffer, this
|
|
|
|
// buffer is unconsumed.
|
|
|
|
if (prev_offset_ + prev_len_ <= bufs_[first].offset_) {
|
|
|
|
bytes_discarded += bufs_[first].buffer_.CurrentSize();
|
|
|
|
}
|
|
|
|
// If last block read overlaps with this buffer and some data is
|
|
|
|
// still unconsumed and previous buffer (second) is not cleared.
|
|
|
|
else if (prev_offset_ + prev_len_ > bufs_[first].offset_ &&
|
|
|
|
bufs_[first].offset_ + bufs_[first].buffer_.CurrentSize() ==
|
|
|
|
bufs_[second].offset_) {
|
|
|
|
bytes_discarded += bufs_[first].buffer_.CurrentSize() -
|
|
|
|
(/*bytes read from this buffer=*/prev_len_ -
|
|
|
|
(bufs_[first].offset_ - prev_offset_));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-26 04:58:22 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
for (uint32_t i = 0; i < 2; i++) {
|
|
|
|
// Release io_handle.
|
|
|
|
DestroyAndClearIOHandle(i);
|
2022-04-04 22:35:43 +00:00
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
RecordInHistogram(stats_, PREFETCHED_BYTES_DISCARDED, bytes_discarded);
|
2022-04-04 22:35:43 +00:00
|
|
|
}
|
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
// Load data into the buffer from a file.
|
2022-02-17 07:17:03 +00:00
|
|
|
// reader : the file reader.
|
|
|
|
// offset : the file offset to start reading from.
|
|
|
|
// n : the number of bytes to read.
|
|
|
|
// rate_limiter_priority : rate limiting priority, or `Env::IO_TOTAL` to
|
|
|
|
// bypass.
|
2020-06-29 21:51:57 +00:00
|
|
|
Status Prefetch(const IOOptions& opts, RandomAccessFileReader* reader,
|
2022-02-17 07:17:03 +00:00
|
|
|
uint64_t offset, size_t n,
|
|
|
|
Env::IOPriority rate_limiter_priority);
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
// Request for reading the data from a file asynchronously.
|
|
|
|
// If data already exists in the buffer, result will be updated.
|
|
|
|
// reader : the file reader.
|
|
|
|
// offset : the file offset to start reading from.
|
|
|
|
// n : the number of bytes to read.
|
|
|
|
// result : if data already exists in the buffer, result will
|
|
|
|
// be updated with the data.
|
|
|
|
//
|
|
|
|
// If data already exist in the buffer, it will return Status::OK, otherwise
|
|
|
|
// it will send asynchronous request and return Status::TryAgain.
|
2022-03-21 14:12:43 +00:00
|
|
|
Status PrefetchAsync(const IOOptions& opts, RandomAccessFileReader* reader,
|
2022-07-06 18:42:59 +00:00
|
|
|
uint64_t offset, size_t n, Slice* result);
|
2022-03-21 14:12:43 +00:00
|
|
|
|
2021-11-20 01:52:42 +00:00
|
|
|
// Tries returning the data for a file read from this buffer if that data is
|
2019-09-16 17:31:27 +00:00
|
|
|
// in the buffer.
|
|
|
|
// It handles tracking the minimum read offset if track_min_offset = true.
|
2021-02-18 22:29:36 +00:00
|
|
|
// It also does the exponential readahead when readahead_size is set as part
|
2019-09-16 17:31:27 +00:00
|
|
|
// of the constructor.
|
|
|
|
//
|
2022-02-17 07:17:03 +00:00
|
|
|
// opts : the IO options to use.
|
|
|
|
// reader : the file reader.
|
|
|
|
// offset : the file offset.
|
|
|
|
// n : the number of bytes.
|
|
|
|
// result : output buffer to put the data into.
|
|
|
|
// s : output status.
|
|
|
|
// rate_limiter_priority : rate limiting priority, or `Env::IO_TOTAL` to
|
|
|
|
// bypass.
|
|
|
|
// for_compaction : true if cache read is done for compaction read.
|
2021-11-20 01:52:42 +00:00
|
|
|
bool TryReadFromCache(const IOOptions& opts, RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t n, Slice* result, Status* s,
|
2022-02-17 07:17:03 +00:00
|
|
|
Env::IOPriority rate_limiter_priority,
|
2021-11-20 01:52:42 +00:00
|
|
|
bool for_compaction = false);
|
2019-09-16 17:31:27 +00:00
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
bool TryReadFromCacheAsync(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader, uint64_t offset,
|
|
|
|
size_t n, Slice* result, Status* status,
|
2022-05-23 19:15:26 +00:00
|
|
|
Env::IOPriority rate_limiter_priority);
|
2022-03-21 14:12:43 +00:00
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
// The minimum `offset` ever passed to TryReadFromCache(). This will nly be
|
|
|
|
// tracked if track_min_offset = true.
|
|
|
|
size_t min_offset_read() const { return min_offset_read_; }
|
|
|
|
|
2021-12-01 06:52:14 +00:00
|
|
|
// Called in case of implicit auto prefetching.
|
2021-11-11 00:18:27 +00:00
|
|
|
void UpdateReadPattern(const uint64_t& offset, const size_t& len,
|
2022-03-21 14:12:43 +00:00
|
|
|
bool decrease_readaheadsize) {
|
|
|
|
if (decrease_readaheadsize) {
|
2021-11-11 00:18:27 +00:00
|
|
|
// Since this block was eligible for prefetch but it was found in
|
|
|
|
// cache, so check and decrease the readahead_size by 8KB (default)
|
|
|
|
// if eligible.
|
|
|
|
DecreaseReadAheadIfEligible(offset, len);
|
|
|
|
}
|
2021-04-28 19:52:53 +00:00
|
|
|
prev_offset_ = offset;
|
|
|
|
prev_len_ = len;
|
Fix db_stress failure in async_io in FilePrefetchBuffer (#10949)
Summary:
Fix db_stress failure in async_io in FilePrefetchBuffer.
From the logs, assertion was caused when
- prev_offset_ = offset but somehow prev_len != 0 and explicit_prefetch_submitted_ = true. That scenario is when we send async request to prefetch buffer during seek but in second seek that data is found in cache. prev_offset_ and prev_len_ get updated but we were not setting explicit_prefetch_submitted_ = false because of which buffers were getting out of sync.
It's possible a read by another thread might have loaded the block into the cache in the meantime.
Particular assertion example:
```
prev_offset: 0, prev_len_: 8097 , offset: 0, length: 8097, actual_length: 8097 , actual_offset: 0 ,
curr_: 0, bufs_[curr_].offset_: 4096 ,bufs_[curr_].CurrentSize(): 48541 , async_len_to_read: 278528, bufs_[curr_].async_in_progress_: false
second: 1, bufs_[second].offset_: 282624 ,bufs_[second].CurrentSize(): 0, async_len_to_read: 262144 ,bufs_[second].async_in_progress_: true ,
explicit_prefetch_submitted_: true , copy_to_third_buffer: false
```
As we can see curr_ was expected to read 278528 but it read 48541. Also buffers are out of sync.
Also `explicit_prefetch_submitted_` is set true but prev_len not 0.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10949
Test Plan:
- Ran db_bench for regression to make sure there is no regression;
- Ran db_stress failing without this fix,
- Ran build-linux-mini-crashtest 7- 8 times locally + CircleCI
Reviewed By: anand1976
Differential Revision: D41257786
Pulled By: akankshamahajan15
fbshipit-source-id: 1d100f94f8c06bbbe4cc76ca27f1bbc820c2494f
2022-11-15 00:14:41 +00:00
|
|
|
explicit_prefetch_submitted_ = false;
|
2021-04-28 19:52:53 +00:00
|
|
|
}
|
|
|
|
|
2021-11-11 00:18:27 +00:00
|
|
|
void GetReadaheadState(ReadaheadFileInfo::ReadaheadInfo* readahead_info) {
|
|
|
|
readahead_info->readahead_size = readahead_size_;
|
|
|
|
readahead_info->num_file_reads = num_file_reads_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecreaseReadAheadIfEligible(uint64_t offset, size_t size,
|
2022-09-13 00:42:01 +00:00
|
|
|
size_t value = DEFAULT_DECREMENT) {
|
2021-11-11 00:18:27 +00:00
|
|
|
// Decrease the readahead_size if
|
|
|
|
// - its enabled internally by RocksDB (implicit_auto_readahead_) and,
|
|
|
|
// - readahead_size is greater than 0 and,
|
|
|
|
// - this block would have called prefetch API if not found in cache for
|
|
|
|
// which conditions are:
|
|
|
|
// - few/no bytes are in buffer and,
|
|
|
|
// - block is sequential with the previous read and,
|
|
|
|
// - num_file_reads_ + 1 (including this read) >
|
2022-09-01 18:56:00 +00:00
|
|
|
// num_file_reads_for_auto_readahead_
|
2022-09-13 00:42:01 +00:00
|
|
|
size_t curr_size = bufs_[curr_].async_read_in_progress_
|
|
|
|
? bufs_[curr_].async_req_len_
|
|
|
|
: bufs_[curr_].buffer_.CurrentSize();
|
2021-11-11 00:18:27 +00:00
|
|
|
if (implicit_auto_readahead_ && readahead_size_ > 0) {
|
2022-09-13 00:42:01 +00:00
|
|
|
if ((offset + size > bufs_[curr_].offset_ + curr_size) &&
|
2021-11-11 00:18:27 +00:00
|
|
|
IsBlockSequential(offset) &&
|
2022-09-01 18:56:00 +00:00
|
|
|
(num_file_reads_ + 1 > num_file_reads_for_auto_readahead_)) {
|
2021-11-11 00:18:27 +00:00
|
|
|
readahead_size_ =
|
2022-04-16 00:28:09 +00:00
|
|
|
std::max(initial_auto_readahead_size_,
|
2021-11-11 00:18:27 +00:00
|
|
|
(readahead_size_ >= value ? readahead_size_ - value : 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
// Callback function passed to underlying FS in case of asynchronous reads.
|
|
|
|
void PrefetchAsyncCallback(const FSReadRequest& req, void* cb_arg);
|
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
private:
|
2022-03-21 14:12:43 +00:00
|
|
|
// Calculates roundoff offset and length to be prefetched based on alignment
|
|
|
|
// and data present in buffer_. It also allocates new buffer or refit tail if
|
|
|
|
// required.
|
|
|
|
void CalculateOffsetAndLen(size_t alignment, uint64_t offset,
|
2022-09-13 00:42:01 +00:00
|
|
|
size_t roundup_len, uint32_t index,
|
|
|
|
bool refit_tail, uint64_t& chunk_len);
|
|
|
|
|
|
|
|
void AbortIOIfNeeded(uint64_t offset);
|
|
|
|
|
|
|
|
void AbortAllIOs();
|
|
|
|
|
|
|
|
void UpdateBuffersIfNeeded(uint64_t offset);
|
2022-03-21 14:12:43 +00:00
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
// It calls Poll API if any there is any pending asynchronous request. It then
|
|
|
|
// checks if data is in any buffer. It clears the outdated data and swaps the
|
|
|
|
// buffers if required.
|
|
|
|
void PollAndUpdateBuffersIfNeeded(uint64_t offset);
|
|
|
|
|
|
|
|
Status PrefetchAsyncInternal(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader, uint64_t offset,
|
|
|
|
size_t length, size_t readahead_size,
|
|
|
|
Env::IOPriority rate_limiter_priority,
|
|
|
|
bool& copy_to_third_buffer);
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
Status Read(const IOOptions& opts, RandomAccessFileReader* reader,
|
|
|
|
Env::IOPriority rate_limiter_priority, uint64_t read_len,
|
|
|
|
uint64_t chunk_len, uint64_t rounddown_start, uint32_t index);
|
|
|
|
|
|
|
|
Status ReadAsync(const IOOptions& opts, RandomAccessFileReader* reader,
|
2022-09-13 00:42:01 +00:00
|
|
|
uint64_t read_len, uint64_t rounddown_start, uint32_t index);
|
2022-03-21 14:12:43 +00:00
|
|
|
|
|
|
|
// Copy the data from src to third buffer.
|
|
|
|
void CopyDataToBuffer(uint32_t src, uint64_t& offset, size_t& length);
|
|
|
|
|
|
|
|
bool IsBlockSequential(const size_t& offset) {
|
|
|
|
return (prev_len_ == 0 || (prev_offset_ + prev_len_ == offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Called in case of implicit auto prefetching.
|
|
|
|
void ResetValues() {
|
|
|
|
num_file_reads_ = 1;
|
2022-04-16 00:28:09 +00:00
|
|
|
readahead_size_ = initial_auto_readahead_size_;
|
2022-03-21 14:12:43 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 03:17:35 +00:00
|
|
|
// Called in case of implicit auto prefetching.
|
2022-05-20 23:09:33 +00:00
|
|
|
bool IsEligibleForPrefetch(uint64_t offset, size_t n) {
|
|
|
|
// Prefetch only if this read is sequential otherwise reset readahead_size_
|
|
|
|
// to initial value.
|
|
|
|
if (!IsBlockSequential(offset)) {
|
|
|
|
UpdateReadPattern(offset, n, false /*decrease_readaheadsize*/);
|
|
|
|
ResetValues();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
num_file_reads_++;
|
2022-06-16 03:17:35 +00:00
|
|
|
|
|
|
|
// Since async request was submitted in last call directly by calling
|
|
|
|
// PrefetchAsync, it skips num_file_reads_ check as this call is to poll the
|
|
|
|
// data submitted in previous call.
|
2022-09-13 00:42:01 +00:00
|
|
|
if (explicit_prefetch_submitted_) {
|
2022-06-16 03:17:35 +00:00
|
|
|
return true;
|
|
|
|
}
|
2022-09-01 18:56:00 +00:00
|
|
|
if (num_file_reads_ <= num_file_reads_for_auto_readahead_) {
|
2022-05-20 23:09:33 +00:00
|
|
|
UpdateReadPattern(offset, n, false /*decrease_readaheadsize*/);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// Helper functions.
|
|
|
|
bool IsDataBlockInBuffer(uint64_t offset, size_t length, uint32_t index) {
|
|
|
|
return (offset >= bufs_[index].offset_ &&
|
|
|
|
offset + length <=
|
|
|
|
bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize());
|
|
|
|
}
|
|
|
|
bool IsOffsetInBuffer(uint64_t offset, uint32_t index) {
|
|
|
|
return (offset >= bufs_[index].offset_ &&
|
|
|
|
offset < bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize());
|
|
|
|
}
|
|
|
|
bool DoesBufferContainData(uint32_t index) {
|
|
|
|
return bufs_[index].buffer_.CurrentSize() > 0;
|
|
|
|
}
|
|
|
|
bool IsBufferOutdated(uint64_t offset, uint32_t index) {
|
|
|
|
return (
|
|
|
|
!bufs_[index].async_read_in_progress_ && DoesBufferContainData(index) &&
|
|
|
|
offset >= bufs_[index].offset_ + bufs_[index].buffer_.CurrentSize());
|
|
|
|
}
|
|
|
|
bool IsBufferOutdatedWithAsyncProgress(uint64_t offset, uint32_t index) {
|
|
|
|
return (bufs_[index].async_read_in_progress_ &&
|
|
|
|
bufs_[index].io_handle_ != nullptr &&
|
|
|
|
offset >= bufs_[index].offset_ + bufs_[index].async_req_len_);
|
|
|
|
}
|
2022-11-01 23:06:51 +00:00
|
|
|
bool IsOffsetInBufferWithAsyncProgress(uint64_t offset, uint32_t index) {
|
|
|
|
return (bufs_[index].async_read_in_progress_ &&
|
|
|
|
offset >= bufs_[index].offset_ &&
|
|
|
|
offset < bufs_[index].offset_ + bufs_[index].async_req_len_);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsSecondBuffEligibleForPrefetching() {
|
|
|
|
uint32_t second = curr_ ^ 1;
|
|
|
|
if (bufs_[second].async_read_in_progress_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
assert(!bufs_[curr_].async_read_in_progress_);
|
|
|
|
|
|
|
|
if (DoesBufferContainData(curr_) && DoesBufferContainData(second) &&
|
|
|
|
(bufs_[curr_].offset_ + bufs_[curr_].buffer_.CurrentSize() ==
|
|
|
|
bufs_[second].offset_)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
bufs_[second].buffer_.Clear();
|
|
|
|
return true;
|
|
|
|
}
|
2022-09-13 00:42:01 +00:00
|
|
|
|
|
|
|
void DestroyAndClearIOHandle(uint32_t index) {
|
|
|
|
if (bufs_[index].io_handle_ != nullptr && bufs_[index].del_fn_ != nullptr) {
|
|
|
|
bufs_[index].del_fn_(bufs_[index].io_handle_);
|
|
|
|
bufs_[index].io_handle_ = nullptr;
|
|
|
|
bufs_[index].del_fn_ = nullptr;
|
|
|
|
}
|
|
|
|
bufs_[index].async_read_in_progress_ = false;
|
|
|
|
}
|
|
|
|
|
2022-11-01 23:06:51 +00:00
|
|
|
Status HandleOverlappingData(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader, uint64_t offset,
|
|
|
|
size_t length, size_t readahead_size,
|
|
|
|
Env::IOPriority rate_limiter_priority,
|
|
|
|
bool& copy_to_third_buffer, uint64_t& tmp_offset,
|
|
|
|
size_t& tmp_length);
|
|
|
|
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
bool TryReadFromCacheUntracked(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t n, Slice* result,
|
|
|
|
Status* s,
|
|
|
|
Env::IOPriority rate_limiter_priority,
|
|
|
|
bool for_compaction = false);
|
|
|
|
|
|
|
|
bool TryReadFromCacheAsyncUntracked(const IOOptions& opts,
|
|
|
|
RandomAccessFileReader* reader,
|
|
|
|
uint64_t offset, size_t n, Slice* result,
|
|
|
|
Status* status,
|
|
|
|
Env::IOPriority rate_limiter_priority);
|
|
|
|
|
2022-03-21 14:12:43 +00:00
|
|
|
std::vector<BufferInfo> bufs_;
|
|
|
|
// curr_ represents the index for bufs_ indicating which buffer is being
|
|
|
|
// consumed currently.
|
|
|
|
uint32_t curr_;
|
2022-09-13 00:42:01 +00:00
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
size_t readahead_size_;
|
2022-04-16 00:28:09 +00:00
|
|
|
size_t initial_auto_readahead_size_;
|
2021-11-11 00:18:27 +00:00
|
|
|
// FilePrefetchBuffer object won't be created from Iterator flow if
|
|
|
|
// max_readahead_size_ = 0.
|
2019-09-16 17:31:27 +00:00
|
|
|
size_t max_readahead_size_;
|
2022-09-13 00:42:01 +00:00
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
// The minimum `offset` ever passed to TryReadFromCache().
|
|
|
|
size_t min_offset_read_;
|
|
|
|
// if false, TryReadFromCache() always return false, and we only take stats
|
|
|
|
// for track_min_offset_ if track_min_offset_ = true
|
|
|
|
bool enable_;
|
|
|
|
// If true, track minimum `offset` ever passed to TryReadFromCache(), which
|
|
|
|
// can be fetched from min_offset_read().
|
|
|
|
bool track_min_offset_;
|
2021-04-28 19:52:53 +00:00
|
|
|
|
2021-11-11 00:18:27 +00:00
|
|
|
// implicit_auto_readahead is enabled by rocksdb internally after 2
|
|
|
|
// sequential IOs.
|
2021-04-28 19:52:53 +00:00
|
|
|
bool implicit_auto_readahead_;
|
2021-11-11 00:18:27 +00:00
|
|
|
uint64_t prev_offset_;
|
2021-04-28 19:52:53 +00:00
|
|
|
size_t prev_len_;
|
2022-09-01 18:56:00 +00:00
|
|
|
// num_file_reads_ and num_file_reads_for_auto_readahead_ is only used when
|
|
|
|
// implicit_auto_readahead_ is set.
|
|
|
|
uint64_t num_file_reads_for_auto_readahead_;
|
2022-06-16 03:17:35 +00:00
|
|
|
uint64_t num_file_reads_;
|
2022-03-21 14:12:43 +00:00
|
|
|
|
2022-09-13 00:42:01 +00:00
|
|
|
// If explicit_prefetch_submitted_ is set then it indicates RocksDB called
|
|
|
|
// PrefetchAsync to submit request. It needs to call TryReadFromCacheAsync to
|
|
|
|
// poll the submitted request without checking if data is sequential and
|
2022-06-16 03:17:35 +00:00
|
|
|
// num_file_reads_.
|
2022-09-13 00:42:01 +00:00
|
|
|
bool explicit_prefetch_submitted_;
|
2022-06-16 03:17:35 +00:00
|
|
|
|
2022-04-04 22:35:43 +00:00
|
|
|
FileSystem* fs_;
|
2022-04-26 04:58:22 +00:00
|
|
|
SystemClock* clock_;
|
|
|
|
Statistics* stats_;
|
Add new stat rocksdb.table.open.prefetch.tail.read.bytes, rocksdb.table.open.prefetch.tail.{miss|hit} (#11265)
Summary:
**Context/Summary:**
We are adding new stats to measure behavior of prefetched tail size and look up into this buffer
The stat collection is done in FilePrefetchBuffer but only for prefetched tail buffer during table open for now using FilePrefetchBuffer enum. It's cleaner than the alternative of implementing in upper-level call places of FilePrefetchBuffer for table open. It also has the benefit of extensible to other types of FilePrefetchBuffer if needed. See db bench for perf regression concern.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11265
Test Plan:
**- Piggyback on existing test**
**- rocksdb.table.open.prefetch.tail.miss is harder to UT so I manually set prefetch tail read bytes to be small and run db bench.**
```
./db_bench -db=/tmp/testdb -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=5000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 -use_direct_reads=true
```
```
rocksdb.table.open.prefetch.tail.read.bytes P50 : 4096.000000 P95 : 4096.000000 P99 : 4096.000000 P100 : 4096.000000 COUNT : 225 SUM : 921600
rocksdb.table.open.prefetch.tail.miss COUNT : 91
rocksdb.table.open.prefetch.tail.hit COUNT : 1034
```
**- No perf regression observed in db_bench**
SETUP command: create same db with ~900 files for pre-change/post-change.
```
./db_bench -db=/tmp/testdb -benchmarks="fillseq" -key_size=32 -value_size=512 -num=500000 -write_buffer_size=655360 -disable_auto_compactions=true -target_file_size_base=16777216 -compression_type=none
```
TEST command 60 runs or til convergence: as suggested by anand1976 and akankshamahajan15, vary `seek_nexts` and `async_io` in testing.
```
./db_bench -use_existing_db=true -db=/tmp/testdb -statistics=false -cache_size=0 -cache_index_and_filter_blocks=false -benchmarks=seekrandom[-X60] -num=50000 -seek_nexts={10, 500, 1000} -async_io={0|1} -use_direct_reads=true
```
async io = 0, direct io read = true
| seek_nexts = 10, 30 runs | seek_nexts = 500, 12 runs | seek_nexts = 1000, 6 runs
-- | -- | -- | --
pre-post change | 4776 (± 28) ops/sec; 24.8 (± 0.1) MB/sec | 288 (± 1) ops/sec; 74.8 (± 0.4) MB/sec | 145 (± 4) ops/sec; 75.6 (± 2.2) MB/sec
post-change | 4790 (± 32) ops/sec; 24.9 (± 0.2) MB/sec | 288 (± 3) ops/sec; 74.7 (± 0.8) MB/sec | 143 (± 3) ops/sec; 74.5 (± 1.6) MB/sec
async io = 1, direct io read = true
| seek_nexts = 10, 54 runs | seek_nexts = 500, 6 runs | seek_nexts = 1000, 4 runs
-- | -- | -- | --
pre-post change | 3350 (± 36) ops/sec; 17.4 (± 0.2) MB/sec | 264 (± 0) ops/sec; 68.7 (± 0.2) MB/sec | 138 (± 1) ops/sec; 71.8 (± 1.0) MB/sec
post-change | 3358 (± 27) ops/sec; 17.4 (± 0.1) MB/sec | 263 (± 2) ops/sec; 68.3 (± 0.8) MB/sec | 139 (± 1) ops/sec; 72.6 (± 0.6) MB/sec
Reviewed By: ajkr
Differential Revision: D43781467
Pulled By: hx235
fbshipit-source-id: a706a18472a8edb2b952bac3af40eec803537f2a
2023-03-15 21:02:43 +00:00
|
|
|
|
|
|
|
FilePrefetchBufferUsage usage_;
|
2019-09-16 17:31:27 +00:00
|
|
|
};
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|