2017-12-11 23:16:37 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/block_fetcher.h"
|
|
|
|
|
|
|
|
#include <string>
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
|
|
|
#include "monitoring/perf_context_imp.h"
|
|
|
|
#include "monitoring/statistics.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "table/block.h"
|
|
|
|
#include "table/block_based_table_reader.h"
|
|
|
|
#include "table/format.h"
|
2018-10-03 00:21:54 +00:00
|
|
|
#include "table/persistent_cache_helper.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/compression.h"
|
|
|
|
#include "util/crc32c.h"
|
|
|
|
#include "util/file_reader_writer.h"
|
|
|
|
#include "util/logging.h"
|
2018-10-26 21:27:09 +00:00
|
|
|
#include "util/memory_allocator.h"
|
2017-12-11 23:16:37 +00:00
|
|
|
#include "util/stop_watch.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "util/xxhash.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
void BlockFetcher::CheckBlockChecksum() {
|
|
|
|
// Check the crc of the type and the block contents
|
|
|
|
if (read_options_.verify_checksums) {
|
|
|
|
const char* data = slice_.data(); // Pointer to where Read put the data
|
|
|
|
PERF_TIMER_GUARD(block_checksum_time);
|
|
|
|
uint32_t value = DecodeFixed32(data + block_size_ + 1);
|
|
|
|
uint32_t actual = 0;
|
|
|
|
switch (footer_.checksum()) {
|
|
|
|
case kNoChecksum:
|
|
|
|
break;
|
|
|
|
case kCRC32c:
|
|
|
|
value = crc32c::Unmask(value);
|
|
|
|
actual = crc32c::Value(data, block_size_ + 1);
|
|
|
|
break;
|
|
|
|
case kxxHash:
|
|
|
|
actual = XXH32(data, static_cast<int>(block_size_) + 1, 0);
|
|
|
|
break;
|
2018-11-01 22:39:40 +00:00
|
|
|
case kxxHash64:
|
|
|
|
actual =static_cast<uint32_t> (
|
|
|
|
XXH64(data, static_cast<int>(block_size_) + 1, 0) &
|
|
|
|
uint64_t{0xffffffff}
|
|
|
|
);
|
|
|
|
break;
|
2017-12-11 23:16:37 +00:00
|
|
|
default:
|
|
|
|
status_ = Status::Corruption(
|
|
|
|
"unknown checksum type " + ToString(footer_.checksum()) + " in " +
|
|
|
|
file_->file_name() + " offset " + ToString(handle_.offset()) +
|
|
|
|
" size " + ToString(block_size_));
|
|
|
|
}
|
|
|
|
if (status_.ok() && actual != value) {
|
|
|
|
status_ = Status::Corruption(
|
|
|
|
"block checksum mismatch: expected " + ToString(actual) + ", got " +
|
|
|
|
ToString(value) + " in " + file_->file_name() + " offset " +
|
|
|
|
ToString(handle_.offset()) + " size " + ToString(block_size_));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
bool BlockFetcher::TryGetUncompressBlockFromPersistentCache() {
|
|
|
|
if (cache_options_.persistent_cache &&
|
|
|
|
!cache_options_.persistent_cache->IsCompressed()) {
|
|
|
|
Status status = PersistentCacheHelper::LookupUncompressedPage(
|
|
|
|
cache_options_, handle_, contents_);
|
|
|
|
if (status.ok()) {
|
|
|
|
// uncompressed page is found for the block handle
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
// uncompressed page is not found
|
|
|
|
if (ioptions_.info_log && !status.IsNotFound()) {
|
|
|
|
assert(!status.ok());
|
|
|
|
ROCKS_LOG_INFO(ioptions_.info_log,
|
|
|
|
"Error reading from persistent cache. %s",
|
|
|
|
status.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
bool BlockFetcher::TryGetFromPrefetchBuffer() {
|
|
|
|
if (prefetch_buffer_ != nullptr &&
|
|
|
|
prefetch_buffer_->TryReadFromCache(
|
|
|
|
handle_.offset(),
|
|
|
|
static_cast<size_t>(handle_.size()) + kBlockTrailerSize, &slice_)) {
|
|
|
|
block_size_ = static_cast<size_t>(handle_.size());
|
|
|
|
CheckBlockChecksum();
|
|
|
|
if (!status_.ok()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
got_from_prefetch_buffer_ = true;
|
|
|
|
used_buf_ = const_cast<char*>(slice_.data());
|
|
|
|
}
|
|
|
|
return got_from_prefetch_buffer_;
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
bool BlockFetcher::TryGetCompressedBlockFromPersistentCache() {
|
|
|
|
if (cache_options_.persistent_cache &&
|
|
|
|
cache_options_.persistent_cache->IsCompressed()) {
|
|
|
|
// lookup uncompressed cache mode p-cache
|
2018-10-03 00:21:54 +00:00
|
|
|
std::unique_ptr<char[]> raw_data;
|
2017-12-11 23:16:37 +00:00
|
|
|
status_ = PersistentCacheHelper::LookupRawPage(
|
2018-10-03 00:21:54 +00:00
|
|
|
cache_options_, handle_, &raw_data, block_size_ + kBlockTrailerSize);
|
2017-12-11 23:16:37 +00:00
|
|
|
if (status_.ok()) {
|
2018-10-03 00:21:54 +00:00
|
|
|
heap_buf_ = CacheAllocationPtr(raw_data.release());
|
2017-12-11 23:16:37 +00:00
|
|
|
used_buf_ = heap_buf_.get();
|
|
|
|
slice_ = Slice(heap_buf_.get(), block_size_);
|
|
|
|
return true;
|
|
|
|
} else if (!status_.IsNotFound() && ioptions_.info_log) {
|
|
|
|
assert(!status_.ok());
|
|
|
|
ROCKS_LOG_INFO(ioptions_.info_log,
|
|
|
|
"Error reading from persistent cache. %s",
|
|
|
|
status_.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
void BlockFetcher::PrepareBufferForBlockFromFile() {
|
|
|
|
// cache miss read from device
|
|
|
|
if (do_uncompress_ &&
|
|
|
|
block_size_ + kBlockTrailerSize < kDefaultStackBufferSize) {
|
|
|
|
// If we've got a small enough hunk of data, read it in to the
|
|
|
|
// trivially allocated stack buffer instead of needing a full malloc()
|
|
|
|
used_buf_ = &stack_buf_[0];
|
2018-11-29 01:58:08 +00:00
|
|
|
} else if (maybe_compressed_ && !do_uncompress_) {
|
|
|
|
compressed_buf_ = AllocateBlock(block_size_ + kBlockTrailerSize,
|
|
|
|
memory_allocator_compressed_);
|
|
|
|
used_buf_ = compressed_buf_.get();
|
2017-12-11 23:16:37 +00:00
|
|
|
} else {
|
2018-11-29 01:58:08 +00:00
|
|
|
heap_buf_ =
|
|
|
|
AllocateBlock(block_size_ + kBlockTrailerSize, memory_allocator_);
|
2017-12-11 23:16:37 +00:00
|
|
|
used_buf_ = heap_buf_.get();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
void BlockFetcher::InsertCompressedBlockToPersistentCacheIfNeeded() {
|
|
|
|
if (status_.ok() && read_options_.fill_cache &&
|
|
|
|
cache_options_.persistent_cache &&
|
|
|
|
cache_options_.persistent_cache->IsCompressed()) {
|
|
|
|
// insert to raw cache
|
|
|
|
PersistentCacheHelper::InsertRawPage(cache_options_, handle_, used_buf_,
|
|
|
|
block_size_ + kBlockTrailerSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
void BlockFetcher::InsertUncompressedBlockToPersistentCacheIfNeeded() {
|
|
|
|
if (status_.ok() && !got_from_prefetch_buffer_ && read_options_.fill_cache &&
|
|
|
|
cache_options_.persistent_cache &&
|
|
|
|
!cache_options_.persistent_cache->IsCompressed()) {
|
|
|
|
// insert to uncompressed cache
|
|
|
|
PersistentCacheHelper::InsertUncompressedPage(cache_options_, handle_,
|
|
|
|
*contents_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-29 01:58:08 +00:00
|
|
|
inline void BlockFetcher::CopyBufferToHeap() {
|
|
|
|
assert(used_buf_ != heap_buf_.get());
|
|
|
|
heap_buf_ = AllocateBlock(block_size_ + kBlockTrailerSize, memory_allocator_);
|
|
|
|
memcpy(heap_buf_.get(), used_buf_, block_size_ + kBlockTrailerSize);
|
|
|
|
}
|
|
|
|
|
2018-03-23 20:16:37 +00:00
|
|
|
inline
|
2017-12-11 23:16:37 +00:00
|
|
|
void BlockFetcher::GetBlockContents() {
|
|
|
|
if (slice_.data() != used_buf_) {
|
|
|
|
// the slice content is not the buffer provided
|
2018-11-14 01:00:49 +00:00
|
|
|
*contents_ = BlockContents(Slice(slice_.data(), block_size_));
|
2017-12-11 23:16:37 +00:00
|
|
|
} else {
|
2018-07-06 20:09:57 +00:00
|
|
|
// page can be either uncompressed or compressed, the buffer either stack
|
|
|
|
// or heap provided. Refer to https://github.com/facebook/rocksdb/pull/4096
|
2017-12-11 23:16:37 +00:00
|
|
|
if (got_from_prefetch_buffer_ || used_buf_ == &stack_buf_[0]) {
|
2018-11-29 01:58:08 +00:00
|
|
|
CopyBufferToHeap();
|
|
|
|
} else if (used_buf_ == compressed_buf_.get()) {
|
|
|
|
if (compression_type_ == kNoCompression &&
|
|
|
|
memory_allocator_ != memory_allocator_compressed_) {
|
|
|
|
CopyBufferToHeap();
|
|
|
|
} else {
|
|
|
|
heap_buf_ = std::move(compressed_buf_);
|
|
|
|
}
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
2018-11-14 01:00:49 +00:00
|
|
|
*contents_ = BlockContents(std::move(heap_buf_), block_size_);
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
2018-11-14 01:00:49 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
contents_->is_raw_block = true;
|
|
|
|
#endif
|
2017-12-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockFetcher::ReadBlockContents() {
|
|
|
|
block_size_ = static_cast<size_t>(handle_.size());
|
|
|
|
|
|
|
|
if (TryGetUncompressBlockFromPersistentCache()) {
|
2018-11-14 01:00:49 +00:00
|
|
|
compression_type_ = kNoCompression;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
contents_->is_raw_block = true;
|
|
|
|
#endif // NDEBUG
|
2017-12-11 23:16:37 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
if (TryGetFromPrefetchBuffer()) {
|
|
|
|
if (!status_.ok()) {
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
} else if (!TryGetCompressedBlockFromPersistentCache()) {
|
|
|
|
PrepareBufferForBlockFromFile();
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
{
|
|
|
|
PERF_TIMER_GUARD(block_read_time);
|
|
|
|
// Actual file read
|
|
|
|
status_ = file_->Read(handle_.offset(), block_size_ + kBlockTrailerSize,
|
|
|
|
&slice_, used_buf_);
|
|
|
|
}
|
|
|
|
PERF_COUNTER_ADD(block_read_count, 1);
|
|
|
|
PERF_COUNTER_ADD(block_read_byte, block_size_ + kBlockTrailerSize);
|
|
|
|
if (!status_.ok()) {
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (slice_.size() != block_size_ + kBlockTrailerSize) {
|
|
|
|
return Status::Corruption("truncated block read from " +
|
|
|
|
file_->file_name() + " offset " +
|
|
|
|
ToString(handle_.offset()) + ", expected " +
|
|
|
|
ToString(block_size_ + kBlockTrailerSize) +
|
|
|
|
" bytes, got " + ToString(slice_.size()));
|
|
|
|
}
|
|
|
|
|
|
|
|
CheckBlockChecksum();
|
|
|
|
if (status_.ok()) {
|
|
|
|
InsertCompressedBlockToPersistentCacheIfNeeded();
|
|
|
|
} else {
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PERF_TIMER_GUARD(block_decompress_time);
|
|
|
|
|
2018-11-14 01:00:49 +00:00
|
|
|
compression_type_ = get_block_compression_type(slice_.data(), block_size_);
|
2017-12-11 23:16:37 +00:00
|
|
|
|
2018-11-14 01:00:49 +00:00
|
|
|
if (do_uncompress_ && compression_type_ != kNoCompression) {
|
2017-12-11 23:16:37 +00:00
|
|
|
// compressed page, uncompress, update cache
|
2019-01-19 03:10:17 +00:00
|
|
|
UncompressionContext context(compression_type_);
|
|
|
|
UncompressionDict dict(compression_dict_, compression_type_);
|
|
|
|
UncompressionInfo info(context, dict, compression_type_);
|
|
|
|
status_ = UncompressBlockContents(info, slice_.data(), block_size_,
|
|
|
|
contents_, footer_.version(), ioptions_,
|
|
|
|
memory_allocator_);
|
2018-11-14 01:00:49 +00:00
|
|
|
compression_type_ = kNoCompression;
|
2017-12-11 23:16:37 +00:00
|
|
|
} else {
|
|
|
|
GetBlockContents();
|
|
|
|
}
|
|
|
|
|
|
|
|
InsertUncompressedBlockToPersistentCacheIfNeeded();
|
|
|
|
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|