mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-30 13:41:46 +00:00
06e593376c
Summary: ## Context/Summary Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity. For that, this PR does the following: - Tag different write IOs by passing down and converting WriteOptions to IOOptions - Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS Some related code refactory to make implementation cleaner: - Blob stats - Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info. - Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write. - Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority - Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification - Build table - TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables - Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder. This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more - Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority ## Test ### db bench Flush ``` ./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100 rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377 rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377 rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 ``` compaction, db oopen ``` Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1 rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279 rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213 rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66 ``` blob stats - just to make sure they aren't broken by this PR ``` Integrated Blob DB Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1 pre-PR: rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600 rocksdb.blobdb.blob.file.synced COUNT : 1 rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 post-PR: rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614 - COUNT is higher and values are smaller as it includes header and footer write - COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164 rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same) rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same) ``` ``` Stacked Blob DB Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench pre-PR: rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876 rocksdb.blobdb.blob.file.synced COUNT : 8 rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 post-PR: rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924 - COUNT is higher and values are smaller as it includes header and footer write - COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164 rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same) rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same) ``` ### Rehearsal CI stress test Trigger 3 full runs of all our CI stress tests ### Performance Flush ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark; enable_statistics = true Pre-pr: avg 507515519.3 ns 497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908, Post-pr: avg 511971266.5 ns, regressed 0.88% 502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408, ``` Compaction ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark Pre-pr: avg 495346098.30 ns 492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846 Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97% 502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007 ``` Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats) ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark Pre-pr: avg 3848.10 ns 3814,3838,3839,3848,3854,3854,3854,3860,3860,3860 Post-pr: avg 3874.20 ns, regressed 0.68% 3863,3867,3871,3874,3875,3877,3877,3877,3880,3881 ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910 Reviewed By: ajkr Differential Revision: D49788060 Pulled By: hx235 fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
509 lines
18 KiB
C++
509 lines
18 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#include "table/plain/plain_table_key_coding.h"
|
|
|
|
#include <algorithm>
|
|
#include <string>
|
|
|
|
#include "db/dbformat.h"
|
|
#include "file/writable_file_writer.h"
|
|
#include "table/plain/plain_table_factory.h"
|
|
#include "table/plain/plain_table_reader.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
enum PlainTableEntryType : unsigned char {
|
|
kFullKey = 0,
|
|
kPrefixFromPreviousKey = 1,
|
|
kKeySuffix = 2,
|
|
};
|
|
|
|
namespace {
|
|
|
|
// Control byte:
|
|
// First two bits indicate type of entry
|
|
// Other bytes are inlined sizes. If all bits are 1 (0x03F), overflow bytes
|
|
// are used. key_size-0x3F will be encoded as a variint32 after this bytes.
|
|
|
|
const unsigned char kSizeInlineLimit = 0x3F;
|
|
|
|
// Return 0 for error
|
|
size_t EncodeSize(PlainTableEntryType type, uint32_t key_size,
|
|
char* out_buffer) {
|
|
out_buffer[0] = type << 6;
|
|
|
|
if (key_size < static_cast<uint32_t>(kSizeInlineLimit)) {
|
|
// size inlined
|
|
out_buffer[0] |= static_cast<char>(key_size);
|
|
return 1;
|
|
} else {
|
|
out_buffer[0] |= kSizeInlineLimit;
|
|
char* ptr = EncodeVarint32(out_buffer + 1, key_size - kSizeInlineLimit);
|
|
return ptr - out_buffer;
|
|
}
|
|
}
|
|
} // namespace
|
|
|
|
// Fill bytes_read with number of bytes read.
|
|
inline Status PlainTableKeyDecoder::DecodeSize(uint32_t start_offset,
|
|
PlainTableEntryType* entry_type,
|
|
uint32_t* key_size,
|
|
uint32_t* bytes_read) {
|
|
Slice next_byte_slice;
|
|
bool success = file_reader_.Read(start_offset, 1, &next_byte_slice);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
*entry_type = static_cast<PlainTableEntryType>(
|
|
(static_cast<unsigned char>(next_byte_slice[0]) & ~kSizeInlineLimit) >>
|
|
6);
|
|
char inline_key_size = next_byte_slice[0] & kSizeInlineLimit;
|
|
if (inline_key_size < kSizeInlineLimit) {
|
|
*key_size = inline_key_size;
|
|
*bytes_read = 1;
|
|
return Status::OK();
|
|
} else {
|
|
uint32_t extra_size;
|
|
uint32_t tmp_bytes_read;
|
|
success = file_reader_.ReadVarint32(start_offset + 1, &extra_size,
|
|
&tmp_bytes_read);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
assert(tmp_bytes_read > 0);
|
|
*key_size = kSizeInlineLimit + extra_size;
|
|
*bytes_read = tmp_bytes_read + 1;
|
|
return Status::OK();
|
|
}
|
|
}
|
|
|
|
IOStatus PlainTableKeyEncoder::AppendKey(const Slice& key,
|
|
WritableFileWriter* file,
|
|
uint64_t* offset, char* meta_bytes_buf,
|
|
size_t* meta_bytes_buf_size) {
|
|
ParsedInternalKey parsed_key;
|
|
Status pik_status =
|
|
ParseInternalKey(key, &parsed_key, false /* log_err_key */); // TODO
|
|
if (!pik_status.ok()) {
|
|
return IOStatus::Corruption(pik_status.getState());
|
|
}
|
|
|
|
Slice key_to_write = key; // Portion of internal key to write out.
|
|
|
|
uint32_t user_key_size = static_cast<uint32_t>(key.size() - 8);
|
|
const IOOptions opts;
|
|
|
|
if (encoding_type_ == kPlain) {
|
|
if (fixed_user_key_len_ == kPlainTableVariableLength) {
|
|
// Write key length
|
|
char key_size_buf[5]; // tmp buffer for key size as varint32
|
|
char* ptr = EncodeVarint32(key_size_buf, user_key_size);
|
|
assert(ptr <= key_size_buf + sizeof(key_size_buf));
|
|
auto len = ptr - key_size_buf;
|
|
IOStatus io_s = file->Append(opts, Slice(key_size_buf, len));
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
*offset += len;
|
|
}
|
|
} else {
|
|
assert(encoding_type_ == kPrefix);
|
|
char size_bytes[12];
|
|
size_t size_bytes_pos = 0;
|
|
|
|
Slice prefix =
|
|
prefix_extractor_->Transform(Slice(key.data(), user_key_size));
|
|
if (key_count_for_prefix_ == 0 || prefix != pre_prefix_.GetUserKey() ||
|
|
key_count_for_prefix_ % index_sparseness_ == 0) {
|
|
key_count_for_prefix_ = 1;
|
|
pre_prefix_.SetUserKey(prefix);
|
|
size_bytes_pos += EncodeSize(kFullKey, user_key_size, size_bytes);
|
|
IOStatus io_s = file->Append(opts, Slice(size_bytes, size_bytes_pos));
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
*offset += size_bytes_pos;
|
|
} else {
|
|
key_count_for_prefix_++;
|
|
if (key_count_for_prefix_ == 2) {
|
|
// For second key within a prefix, need to encode prefix length
|
|
size_bytes_pos +=
|
|
EncodeSize(kPrefixFromPreviousKey,
|
|
static_cast<uint32_t>(pre_prefix_.GetUserKey().size()),
|
|
size_bytes + size_bytes_pos);
|
|
}
|
|
uint32_t prefix_len =
|
|
static_cast<uint32_t>(pre_prefix_.GetUserKey().size());
|
|
size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len,
|
|
size_bytes + size_bytes_pos);
|
|
IOStatus io_s = file->Append(opts, Slice(size_bytes, size_bytes_pos));
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
*offset += size_bytes_pos;
|
|
key_to_write = Slice(key.data() + prefix_len, key.size() - prefix_len);
|
|
}
|
|
}
|
|
|
|
// Encode full key
|
|
// For value size as varint32 (up to 5 bytes).
|
|
// If the row is of value type with seqId 0, flush the special flag together
|
|
// in this buffer to safe one file append call, which takes 1 byte.
|
|
if (parsed_key.sequence == 0 && parsed_key.type == kTypeValue) {
|
|
IOStatus io_s =
|
|
file->Append(opts, Slice(key_to_write.data(), key_to_write.size() - 8));
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
*offset += key_to_write.size() - 8;
|
|
meta_bytes_buf[*meta_bytes_buf_size] = PlainTableFactory::kValueTypeSeqId0;
|
|
*meta_bytes_buf_size += 1;
|
|
} else {
|
|
IOStatus io_s = file->Append(opts, key_to_write);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
*offset += key_to_write.size();
|
|
}
|
|
|
|
return IOStatus::OK();
|
|
}
|
|
|
|
Slice PlainTableFileReader::GetFromBuffer(Buffer* buffer, uint32_t file_offset,
|
|
uint32_t len) {
|
|
assert(file_offset + len <= file_info_->data_end_offset);
|
|
return Slice(buffer->buf.get() + (file_offset - buffer->buf_start_offset),
|
|
len);
|
|
}
|
|
|
|
bool PlainTableFileReader::ReadNonMmap(uint32_t file_offset, uint32_t len,
|
|
Slice* out) {
|
|
const uint32_t kPrefetchSize = 256u;
|
|
|
|
// Try to read from buffers.
|
|
for (uint32_t i = 0; i < num_buf_; i++) {
|
|
Buffer* buffer = buffers_[num_buf_ - 1 - i].get();
|
|
if (file_offset >= buffer->buf_start_offset &&
|
|
file_offset + len <= buffer->buf_start_offset + buffer->buf_len) {
|
|
*out = GetFromBuffer(buffer, file_offset, len);
|
|
return true;
|
|
}
|
|
}
|
|
|
|
Buffer* new_buffer;
|
|
// Data needed is not in any of the buffer. Allocate a new buffer.
|
|
if (num_buf_ < buffers_.size()) {
|
|
// Add a new buffer
|
|
new_buffer = new Buffer();
|
|
buffers_[num_buf_++].reset(new_buffer);
|
|
} else {
|
|
// Now simply replace the last buffer. Can improve the placement policy
|
|
// if needed.
|
|
new_buffer = buffers_[num_buf_ - 1].get();
|
|
}
|
|
|
|
assert(file_offset + len <= file_info_->data_end_offset);
|
|
uint32_t size_to_read = std::min(file_info_->data_end_offset - file_offset,
|
|
std::max(kPrefetchSize, len));
|
|
if (size_to_read > new_buffer->buf_capacity) {
|
|
new_buffer->buf.reset(new char[size_to_read]);
|
|
new_buffer->buf_capacity = size_to_read;
|
|
new_buffer->buf_len = 0;
|
|
}
|
|
Slice read_result;
|
|
// TODO: rate limit plain table reads.
|
|
Status s =
|
|
file_info_->file->Read(IOOptions(), file_offset, size_to_read,
|
|
&read_result, new_buffer->buf.get(), nullptr);
|
|
if (!s.ok()) {
|
|
status_ = s;
|
|
return false;
|
|
}
|
|
new_buffer->buf_start_offset = file_offset;
|
|
new_buffer->buf_len = size_to_read;
|
|
*out = GetFromBuffer(new_buffer, file_offset, len);
|
|
return true;
|
|
}
|
|
|
|
inline bool PlainTableFileReader::ReadVarint32(uint32_t offset, uint32_t* out,
|
|
uint32_t* bytes_read) {
|
|
if (file_info_->is_mmap_mode) {
|
|
const char* start = file_info_->file_data.data() + offset;
|
|
const char* limit =
|
|
file_info_->file_data.data() + file_info_->data_end_offset;
|
|
const char* key_ptr = GetVarint32Ptr(start, limit, out);
|
|
assert(key_ptr != nullptr);
|
|
*bytes_read = static_cast<uint32_t>(key_ptr - start);
|
|
return true;
|
|
} else {
|
|
return ReadVarint32NonMmap(offset, out, bytes_read);
|
|
}
|
|
}
|
|
|
|
bool PlainTableFileReader::ReadVarint32NonMmap(uint32_t offset, uint32_t* out,
|
|
uint32_t* bytes_read) {
|
|
const char* start;
|
|
const char* limit;
|
|
const uint32_t kMaxVarInt32Size = 6u;
|
|
uint32_t bytes_to_read =
|
|
std::min(file_info_->data_end_offset - offset, kMaxVarInt32Size);
|
|
Slice bytes;
|
|
if (!Read(offset, bytes_to_read, &bytes)) {
|
|
return false;
|
|
}
|
|
start = bytes.data();
|
|
limit = bytes.data() + bytes.size();
|
|
|
|
const char* key_ptr = GetVarint32Ptr(start, limit, out);
|
|
*bytes_read =
|
|
(key_ptr != nullptr) ? static_cast<uint32_t>(key_ptr - start) : 0;
|
|
return true;
|
|
}
|
|
|
|
Status PlainTableKeyDecoder::ReadInternalKey(
|
|
uint32_t file_offset, uint32_t user_key_size, ParsedInternalKey* parsed_key,
|
|
uint32_t* bytes_read, bool* internal_key_valid, Slice* internal_key) {
|
|
Slice tmp_slice;
|
|
bool success = file_reader_.Read(file_offset, user_key_size + 1, &tmp_slice);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
if (tmp_slice[user_key_size] == PlainTableFactory::kValueTypeSeqId0) {
|
|
// Special encoding for the row with seqID=0
|
|
parsed_key->user_key = Slice(tmp_slice.data(), user_key_size);
|
|
parsed_key->sequence = 0;
|
|
parsed_key->type = kTypeValue;
|
|
*bytes_read += user_key_size + 1;
|
|
*internal_key_valid = false;
|
|
} else {
|
|
success = file_reader_.Read(file_offset, user_key_size + 8, internal_key);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
*internal_key_valid = true;
|
|
Status pik_status = ParseInternalKey(*internal_key, parsed_key,
|
|
false /* log_err_key */); // TODO
|
|
if (!pik_status.ok()) {
|
|
return Status::Corruption(
|
|
Slice("Corrupted key found during next key read. "),
|
|
pik_status.getState());
|
|
}
|
|
*bytes_read += user_key_size + 8;
|
|
}
|
|
return Status::OK();
|
|
}
|
|
|
|
Status PlainTableKeyDecoder::NextPlainEncodingKey(uint32_t start_offset,
|
|
ParsedInternalKey* parsed_key,
|
|
Slice* internal_key,
|
|
uint32_t* bytes_read,
|
|
bool* /*seekable*/) {
|
|
uint32_t user_key_size = 0;
|
|
Status s;
|
|
if (fixed_user_key_len_ != kPlainTableVariableLength) {
|
|
user_key_size = fixed_user_key_len_;
|
|
} else {
|
|
uint32_t tmp_size = 0;
|
|
uint32_t tmp_read;
|
|
bool success =
|
|
file_reader_.ReadVarint32(start_offset, &tmp_size, &tmp_read);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
assert(tmp_read > 0);
|
|
user_key_size = tmp_size;
|
|
*bytes_read = tmp_read;
|
|
}
|
|
// dummy initial value to avoid compiler complain
|
|
bool decoded_internal_key_valid = true;
|
|
Slice decoded_internal_key;
|
|
s = ReadInternalKey(start_offset + *bytes_read, user_key_size, parsed_key,
|
|
bytes_read, &decoded_internal_key_valid,
|
|
&decoded_internal_key);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
if (!file_reader_.file_info()->is_mmap_mode) {
|
|
cur_key_.SetInternalKey(*parsed_key);
|
|
parsed_key->user_key =
|
|
Slice(cur_key_.GetInternalKey().data(), user_key_size);
|
|
if (internal_key != nullptr) {
|
|
*internal_key = cur_key_.GetInternalKey();
|
|
}
|
|
} else if (internal_key != nullptr) {
|
|
if (decoded_internal_key_valid) {
|
|
*internal_key = decoded_internal_key;
|
|
} else {
|
|
// Need to copy out the internal key
|
|
cur_key_.SetInternalKey(*parsed_key);
|
|
*internal_key = cur_key_.GetInternalKey();
|
|
}
|
|
}
|
|
return Status::OK();
|
|
}
|
|
|
|
Status PlainTableKeyDecoder::NextPrefixEncodingKey(
|
|
uint32_t start_offset, ParsedInternalKey* parsed_key, Slice* internal_key,
|
|
uint32_t* bytes_read, bool* seekable) {
|
|
PlainTableEntryType entry_type;
|
|
|
|
bool expect_suffix = false;
|
|
Status s;
|
|
do {
|
|
uint32_t size = 0;
|
|
// dummy initial value to avoid compiler complain
|
|
bool decoded_internal_key_valid = true;
|
|
uint32_t my_bytes_read = 0;
|
|
s = DecodeSize(start_offset + *bytes_read, &entry_type, &size,
|
|
&my_bytes_read);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
if (my_bytes_read == 0) {
|
|
return Status::Corruption("Unexpected EOF when reading size of the key");
|
|
}
|
|
*bytes_read += my_bytes_read;
|
|
|
|
switch (entry_type) {
|
|
case kFullKey: {
|
|
expect_suffix = false;
|
|
Slice decoded_internal_key;
|
|
s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key,
|
|
bytes_read, &decoded_internal_key_valid,
|
|
&decoded_internal_key);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
if (!file_reader_.file_info()->is_mmap_mode ||
|
|
(internal_key != nullptr && !decoded_internal_key_valid)) {
|
|
// In non-mmap mode, always need to make a copy of keys returned to
|
|
// users, because after reading value for the key, the key might
|
|
// be invalid.
|
|
cur_key_.SetInternalKey(*parsed_key);
|
|
saved_user_key_ = cur_key_.GetUserKey();
|
|
if (!file_reader_.file_info()->is_mmap_mode) {
|
|
parsed_key->user_key =
|
|
Slice(cur_key_.GetInternalKey().data(), size);
|
|
}
|
|
if (internal_key != nullptr) {
|
|
*internal_key = cur_key_.GetInternalKey();
|
|
}
|
|
} else {
|
|
if (internal_key != nullptr) {
|
|
*internal_key = decoded_internal_key;
|
|
}
|
|
saved_user_key_ = parsed_key->user_key;
|
|
}
|
|
break;
|
|
}
|
|
case kPrefixFromPreviousKey: {
|
|
if (seekable != nullptr) {
|
|
*seekable = false;
|
|
}
|
|
prefix_len_ = size;
|
|
assert(prefix_extractor_ == nullptr ||
|
|
prefix_extractor_->Transform(saved_user_key_).size() ==
|
|
prefix_len_);
|
|
// Need read another size flag for suffix
|
|
expect_suffix = true;
|
|
break;
|
|
}
|
|
case kKeySuffix: {
|
|
expect_suffix = false;
|
|
if (seekable != nullptr) {
|
|
*seekable = false;
|
|
}
|
|
|
|
Slice tmp_slice;
|
|
s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key,
|
|
bytes_read, &decoded_internal_key_valid,
|
|
&tmp_slice);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
if (!file_reader_.file_info()->is_mmap_mode) {
|
|
// In non-mmap mode, we need to make a copy of keys returned to
|
|
// users, because after reading value for the key, the key might
|
|
// be invalid.
|
|
// saved_user_key_ points to cur_key_. We are making a copy of
|
|
// the prefix part to another string, and construct the current
|
|
// key from the prefix part and the suffix part back to cur_key_.
|
|
std::string tmp =
|
|
Slice(saved_user_key_.data(), prefix_len_).ToString();
|
|
cur_key_.Reserve(prefix_len_ + size);
|
|
cur_key_.SetInternalKey(tmp, *parsed_key);
|
|
parsed_key->user_key =
|
|
Slice(cur_key_.GetInternalKey().data(), prefix_len_ + size);
|
|
saved_user_key_ = cur_key_.GetUserKey();
|
|
} else {
|
|
cur_key_.Reserve(prefix_len_ + size);
|
|
cur_key_.SetInternalKey(Slice(saved_user_key_.data(), prefix_len_),
|
|
*parsed_key);
|
|
}
|
|
parsed_key->user_key = cur_key_.GetUserKey();
|
|
if (internal_key != nullptr) {
|
|
*internal_key = cur_key_.GetInternalKey();
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
return Status::Corruption("Un-identified size flag.");
|
|
}
|
|
} while (expect_suffix); // Another round if suffix is expected.
|
|
return Status::OK();
|
|
}
|
|
|
|
Status PlainTableKeyDecoder::NextKey(uint32_t start_offset,
|
|
ParsedInternalKey* parsed_key,
|
|
Slice* internal_key, Slice* value,
|
|
uint32_t* bytes_read, bool* seekable) {
|
|
assert(value != nullptr);
|
|
Status s = NextKeyNoValue(start_offset, parsed_key, internal_key, bytes_read,
|
|
seekable);
|
|
if (s.ok()) {
|
|
assert(bytes_read != nullptr);
|
|
uint32_t value_size;
|
|
uint32_t value_size_bytes;
|
|
bool success = file_reader_.ReadVarint32(start_offset + *bytes_read,
|
|
&value_size, &value_size_bytes);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
if (value_size_bytes == 0) {
|
|
return Status::Corruption(
|
|
"Unexpected EOF when reading the next value's size.");
|
|
}
|
|
*bytes_read += value_size_bytes;
|
|
success = file_reader_.Read(start_offset + *bytes_read, value_size, value);
|
|
if (!success) {
|
|
return file_reader_.status();
|
|
}
|
|
*bytes_read += value_size;
|
|
}
|
|
return s;
|
|
}
|
|
|
|
Status PlainTableKeyDecoder::NextKeyNoValue(uint32_t start_offset,
|
|
ParsedInternalKey* parsed_key,
|
|
Slice* internal_key,
|
|
uint32_t* bytes_read,
|
|
bool* seekable) {
|
|
*bytes_read = 0;
|
|
if (seekable != nullptr) {
|
|
*seekable = true;
|
|
}
|
|
if (encoding_type_ == kPlain) {
|
|
return NextPlainEncodingKey(start_offset, parsed_key, internal_key,
|
|
bytes_read, seekable);
|
|
} else {
|
|
assert(encoding_type_ == kPrefix);
|
|
return NextPrefixEncodingKey(start_offset, parsed_key, internal_key,
|
|
bytes_read, seekable);
|
|
}
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|