Fix PREFETCH_BYTES_USEFUL stat calculation (#12251)

Summary:
After refactoring of FilePrefetchBuffer, PREFETCH_BYTES_USEFUL was miscalculated. Instead of calculating how many requested bytes are already in the buffer, it took into account alignment as well because aligned_useful_len takes into consideration alignment too.

Also refactored the naming of chunk_offset_in_buffer to make it similar to aligned_useful_len

Pull Request resolved: https://github.com/facebook/rocksdb/pull/12251

Test Plan:
1. Validated internally through release validation benchmarks.
2. Updated unit test that fails without the fix.

Reviewed By: ajkr

Differential Revision: D52891112

Pulled By: akankshamahajan15

fbshipit-source-id: 2526a0b0572d473beaf8b841f2f9c2f6275d9779
This commit is contained in:
akankshamahajan 2024-01-18 19:09:49 -08:00 committed by Facebook GitHub Bot
parent 4835c11cce
commit b5bb553d5e
2 changed files with 30 additions and 19 deletions

View File

@ -27,7 +27,7 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
size_t roundup_len,
bool refit_tail,
uint64_t& aligned_useful_len) {
uint64_t chunk_offset_in_buffer = 0;
uint64_t aligned_useful_offset_in_buf = 0;
bool copy_data_to_new_buffer = false;
// Check if requested bytes are in the existing buffer_.
// If only a few bytes exist -- reuse them & read only what is really needed.
@ -37,19 +37,19 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
// Only a few requested bytes are in the buffer. memmove those chunk of
// bytes to the beginning, and memcpy them back into the new buffer if a
// new buffer is created.
chunk_offset_in_buffer =
aligned_useful_offset_in_buf =
Rounddown(static_cast<size_t>(offset - buf->offset_), alignment);
aligned_useful_len =
static_cast<uint64_t>(buf->CurrentSize()) - chunk_offset_in_buffer;
assert(chunk_offset_in_buffer % alignment == 0);
aligned_useful_len = static_cast<uint64_t>(buf->CurrentSize()) -
aligned_useful_offset_in_buf;
assert(aligned_useful_offset_in_buf % alignment == 0);
assert(aligned_useful_len % alignment == 0);
assert(chunk_offset_in_buffer + aligned_useful_len <=
assert(aligned_useful_offset_in_buf + aligned_useful_len <=
buf->offset_ + buf->CurrentSize());
if (aligned_useful_len > 0) {
copy_data_to_new_buffer = true;
} else {
// this reset is not necessary, but just to be safe.
chunk_offset_in_buffer = 0;
aligned_useful_offset_in_buf = 0;
}
}
@ -60,11 +60,11 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
buf->buffer_.Alignment(alignment);
buf->buffer_.AllocateNewBuffer(
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
chunk_offset_in_buffer, static_cast<size_t>(aligned_useful_len));
aligned_useful_offset_in_buf, static_cast<size_t>(aligned_useful_len));
} else if (aligned_useful_len > 0 && refit_tail) {
// New buffer not needed. But memmove bytes from tail to the beginning since
// aligned_useful_len is greater than 0.
buf->buffer_.RefitTail(static_cast<size_t>(chunk_offset_in_buffer),
buf->buffer_.RefitTail(static_cast<size_t>(aligned_useful_offset_in_buf),
static_cast<size_t>(aligned_useful_len));
} else if (aligned_useful_len > 0) {
// For async prefetching, it doesn't call RefitTail with aligned_useful_len
@ -75,7 +75,7 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
buf->buffer_.Alignment(alignment);
buf->buffer_.AllocateNewBuffer(
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
chunk_offset_in_buffer, static_cast<size_t>(aligned_useful_len));
aligned_useful_offset_in_buf, static_cast<size_t>(aligned_useful_len));
}
}
@ -470,11 +470,8 @@ Status FilePrefetchBuffer::HandleOverlappingData(
overlap_buf_->offset_ = offset;
copy_to_overlap_buffer = true;
size_t initial_buf_size = overlap_buf_->CurrentSize();
CopyDataToBuffer(buf, tmp_offset, tmp_length);
UpdateStats(
/*found_in_buffer=*/false,
overlap_buf_->CurrentSize() - initial_buf_size);
UpdateStats(/*found_in_buffer=*/false, overlap_buf_->CurrentSize());
// Call async prefetching on freed buffer since data has been consumed
// only if requested data lies within next buffer.
@ -635,11 +632,14 @@ Status FilePrefetchBuffer::PrefetchInternal(const IOOptions& opts,
// For length == 0, skip the synchronous prefetching. read_len1 will be 0.
if (length > 0) {
if (buf->IsOffsetInBuffer(offset)) {
UpdateStats(/*found_in_buffer=*/false,
(buf->offset_ + buf->CurrentSize() - offset));
}
ReadAheadSizeTuning(buf, /*read_curr_block=*/true, /*refit_tail*/
true, start_offset1, alignment, length, readahead_size,
start_offset1, end_offset1, read_len1,
aligned_useful_len1);
UpdateStats(/*found_in_buffer=*/false, aligned_useful_len1);
} else {
UpdateStats(/*found_in_buffer=*/true, original_length);
}

View File

@ -3108,8 +3108,8 @@ TEST_F(FilePrefetchBufferTest, SyncReadaheadStats) {
// Simulate a block cache hit
fpb.UpdateReadPattern(4096, 4096, false);
// Now read some data that'll prefetch additional data from 12288 to 16384
// (4096) + 8192 (readahead_size).
// Now read some data that'll prefetch additional data from 12288 to 24576.
// (8192) + 8192 (readahead_size).
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 8192, 8192, &result, &s));
ASSERT_EQ(s, Status::OK());
@ -3119,8 +3119,19 @@ TEST_F(FilePrefetchBufferTest, SyncReadaheadStats) {
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 12288, 4096, &result, &s));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(stats->getTickerCount(PREFETCH_HITS), 1);
ASSERT_EQ(stats->getTickerCount(PREFETCH_BYTES_USEFUL), 8192);
ASSERT_EQ(stats->getAndResetTickerCount(PREFETCH_HITS), 1);
ASSERT_EQ(stats->getAndResetTickerCount(PREFETCH_BYTES_USEFUL), 8192);
// Now read some data with length doesn't align with aligment and it needs
// prefetching. Read from 16000 with length 10000 (i.e. requested end offset -
// 26000).
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 16000, 10000, &result, &s));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(stats->getAndResetTickerCount(PREFETCH_HITS), 0);
ASSERT_EQ(
stats->getAndResetTickerCount(PREFETCH_BYTES_USEFUL),
/* 24576(end offset of the buffer) - 16000(requested offset) =*/8576);
}
} // namespace ROCKSDB_NAMESPACE