2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/format.h"
|
|
|
|
|
2014-02-05 00:21:47 +00:00
|
|
|
#include <string>
|
2014-02-08 03:26:49 +00:00
|
|
|
#include <inttypes.h>
|
2014-02-05 00:21:47 +00:00
|
|
|
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
|
|
|
#include "monitoring/statistics.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
#include "rocksdb/env.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/block.h"
|
2015-12-16 02:20:10 +00:00
|
|
|
#include "table/block_based_table_reader.h"
|
|
|
|
#include "table/persistent_cache_helper.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
2015-01-09 20:57:11 +00:00
|
|
|
#include "util/compression.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/crc32c.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
#include "util/file_reader_writer.h"
|
2017-03-16 02:22:52 +00:00
|
|
|
#include "util/logging.h"
|
2016-07-19 16:44:03 +00:00
|
|
|
#include "util/stop_watch.h"
|
2017-03-16 02:22:52 +00:00
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "util/xxhash.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
2014-05-08 00:45:27 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
2014-05-01 18:09:32 +00:00
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
2014-05-08 00:45:27 +00:00
|
|
|
#else
|
|
|
|
// ROCKSDB_LITE doesn't have plain table
|
|
|
|
const uint64_t kLegacyPlainTableMagicNumber = 0;
|
|
|
|
const uint64_t kPlainTableMagicNumber = 0;
|
|
|
|
#endif
|
2014-07-31 06:11:59 +00:00
|
|
|
const uint32_t DefaultStackBufferSize = 5000;
|
2014-05-01 18:09:32 +00:00
|
|
|
|
2016-07-19 16:44:03 +00:00
|
|
|
bool ShouldReportDetailedTime(Env* env, Statistics* stats) {
|
|
|
|
return env != nullptr && stats != nullptr &&
|
|
|
|
stats->stats_level_ > kExceptDetailedTimers;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void BlockHandle::EncodeTo(std::string* dst) const {
|
|
|
|
// Sanity check that all fields have been set
|
|
|
|
assert(offset_ != ~static_cast<uint64_t>(0));
|
|
|
|
assert(size_ != ~static_cast<uint64_t>(0));
|
2016-06-13 16:57:43 +00:00
|
|
|
PutVarint64Varint64(dst, offset_, size_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
|
|
|
if (GetVarint64(input, &offset_) &&
|
|
|
|
GetVarint64(input, &size_)) {
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
2016-11-05 16:10:51 +00:00
|
|
|
// reset in case failure after partially decoding
|
|
|
|
offset_ = 0;
|
|
|
|
size_ = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::Corruption("bad block handle");
|
|
|
|
}
|
|
|
|
}
|
2014-12-23 21:24:07 +00:00
|
|
|
|
|
|
|
// Return a string that contains the copy of handle.
|
|
|
|
std::string BlockHandle::ToString(bool hex) const {
|
|
|
|
std::string handle_str;
|
|
|
|
EncodeTo(&handle_str);
|
|
|
|
if (hex) {
|
2016-03-30 04:25:12 +00:00
|
|
|
return Slice(handle_str).ToString(true);
|
2014-12-23 21:24:07 +00:00
|
|
|
} else {
|
|
|
|
return handle_str;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-04 23:43:09 +00:00
|
|
|
const BlockHandle BlockHandle::kNullBlockHandle(0, 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-01-13 22:33:04 +00:00
|
|
|
namespace {
|
|
|
|
inline bool IsLegacyFooterFormat(uint64_t magic_number) {
|
|
|
|
return magic_number == kLegacyBlockBasedTableMagicNumber ||
|
|
|
|
magic_number == kLegacyPlainTableMagicNumber;
|
|
|
|
}
|
|
|
|
inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) {
|
|
|
|
if (magic_number == kLegacyBlockBasedTableMagicNumber) {
|
|
|
|
return kBlockBasedTableMagicNumber;
|
|
|
|
}
|
|
|
|
if (magic_number == kLegacyPlainTableMagicNumber) {
|
|
|
|
return kPlainTableMagicNumber;
|
|
|
|
}
|
|
|
|
assert(false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
// legacy footer format:
|
|
|
|
// metaindex handle (varint64 offset, varint64 size)
|
|
|
|
// index handle (varint64 offset, varint64 size)
|
|
|
|
// <padding> to make the total size 2 * BlockHandle::kMaxEncodedLength
|
|
|
|
// table_magic_number (8 bytes)
|
|
|
|
// new footer format:
|
|
|
|
// checksum (char, 1 byte)
|
|
|
|
// metaindex handle (varint64 offset, varint64 size)
|
|
|
|
// index handle (varint64 offset, varint64 size)
|
|
|
|
// <padding> to make the total size 2 * BlockHandle::kMaxEncodedLength + 1
|
|
|
|
// footer version (4 bytes)
|
|
|
|
// table_magic_number (8 bytes)
|
2011-03-18 22:37:00 +00:00
|
|
|
void Footer::EncodeTo(std::string* dst) const {
|
2015-01-13 22:33:04 +00:00
|
|
|
assert(HasInitializedTableMagicNumber());
|
|
|
|
if (IsLegacyFooterFormat(table_magic_number())) {
|
2014-05-01 18:09:32 +00:00
|
|
|
// has to be default checksum with legacy footer
|
|
|
|
assert(checksum_ == kCRC32c);
|
|
|
|
const size_t original_size = dst->size();
|
|
|
|
metaindex_handle_.EncodeTo(dst);
|
|
|
|
index_handle_.EncodeTo(dst);
|
|
|
|
dst->resize(original_size + 2 * BlockHandle::kMaxEncodedLength); // Padding
|
|
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() & 0xffffffffu));
|
|
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() >> 32));
|
|
|
|
assert(dst->size() == original_size + kVersion0EncodedLength);
|
|
|
|
} else {
|
|
|
|
const size_t original_size = dst->size();
|
|
|
|
dst->push_back(static_cast<char>(checksum_));
|
|
|
|
metaindex_handle_.EncodeTo(dst);
|
|
|
|
index_handle_.EncodeTo(dst);
|
2015-01-13 22:33:04 +00:00
|
|
|
dst->resize(original_size + kNewVersionsEncodedLength - 12); // Padding
|
|
|
|
PutFixed32(dst, version());
|
2014-05-01 18:09:32 +00:00
|
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() & 0xffffffffu));
|
|
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() >> 32));
|
2015-01-13 22:33:04 +00:00
|
|
|
assert(dst->size() == original_size + kNewVersionsEncodedLength);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2014-05-01 18:09:32 +00:00
|
|
|
|
2015-01-13 22:33:04 +00:00
|
|
|
Footer::Footer(uint64_t _table_magic_number, uint32_t _version)
|
|
|
|
: version_(_version),
|
2014-05-01 18:09:32 +00:00
|
|
|
checksum_(kCRC32c),
|
2015-01-13 22:33:04 +00:00
|
|
|
table_magic_number_(_table_magic_number) {
|
|
|
|
// This should be guaranteed by constructor callers
|
|
|
|
assert(!IsLegacyFooterFormat(_table_magic_number) || version_ == 0);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
Status Footer::DecodeFrom(Slice* input) {
|
2015-01-13 22:33:04 +00:00
|
|
|
assert(!HasInitializedTableMagicNumber());
|
2013-03-01 02:04:58 +00:00
|
|
|
assert(input != nullptr);
|
2014-05-01 18:09:32 +00:00
|
|
|
assert(input->size() >= kMinEncodedLength);
|
2013-01-09 18:44:30 +00:00
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
const char *magic_ptr =
|
|
|
|
input->data() + input->size() - kMagicNumberLengthByte;
|
2011-03-18 22:37:00 +00:00
|
|
|
const uint32_t magic_lo = DecodeFixed32(magic_ptr);
|
|
|
|
const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4);
|
2014-05-01 18:09:32 +00:00
|
|
|
uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
|
|
|
|
(static_cast<uint64_t>(magic_lo)));
|
|
|
|
|
|
|
|
// We check for legacy formats here and silently upconvert them
|
|
|
|
bool legacy = IsLegacyFooterFormat(magic);
|
|
|
|
if (legacy) {
|
|
|
|
magic = UpconvertLegacyFooterFormat(magic);
|
|
|
|
}
|
2015-01-13 22:33:04 +00:00
|
|
|
set_table_magic_number(magic);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
if (legacy) {
|
|
|
|
// The size is already asserted to be at least kMinEncodedLength
|
|
|
|
// at the beginning of the function
|
|
|
|
input->remove_prefix(input->size() - kVersion0EncodedLength);
|
2015-01-13 22:33:04 +00:00
|
|
|
version_ = 0 /* legacy */;
|
2014-05-01 18:09:32 +00:00
|
|
|
checksum_ = kCRC32c;
|
|
|
|
} else {
|
|
|
|
version_ = DecodeFixed32(magic_ptr - 4);
|
2015-01-13 22:33:04 +00:00
|
|
|
// Footer version 1 and higher will always occupy exactly this many bytes.
|
2014-05-01 18:09:32 +00:00
|
|
|
// It consists of the checksum type, two block handles, padding,
|
|
|
|
// a version number, and a magic number
|
2015-01-13 22:33:04 +00:00
|
|
|
if (input->size() < kNewVersionsEncodedLength) {
|
2014-09-11 00:00:00 +00:00
|
|
|
return Status::Corruption("input is too short to be an sstable");
|
2014-05-01 18:09:32 +00:00
|
|
|
} else {
|
2015-01-13 22:33:04 +00:00
|
|
|
input->remove_prefix(input->size() - kNewVersionsEncodedLength);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
2014-11-06 19:14:28 +00:00
|
|
|
uint32_t chksum;
|
|
|
|
if (!GetVarint32(input, &chksum)) {
|
2014-05-01 18:09:32 +00:00
|
|
|
return Status::Corruption("bad checksum type");
|
|
|
|
}
|
2014-11-06 19:14:28 +00:00
|
|
|
checksum_ = static_cast<ChecksumType>(chksum);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Status result = metaindex_handle_.DecodeFrom(input);
|
|
|
|
if (result.ok()) {
|
|
|
|
result = index_handle_.DecodeFrom(input);
|
|
|
|
}
|
|
|
|
if (result.ok()) {
|
|
|
|
// We skip over any leftover data (just padding for now) in "input"
|
2014-05-01 18:09:32 +00:00
|
|
|
const char* end = magic_ptr + kMagicNumberLengthByte;
|
2011-03-18 22:37:00 +00:00
|
|
|
*input = Slice(end, input->data() + input->size() - end);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-12-23 21:24:07 +00:00
|
|
|
std::string Footer::ToString() const {
|
|
|
|
std::string result, handle_;
|
|
|
|
result.reserve(1024);
|
|
|
|
|
|
|
|
bool legacy = IsLegacyFooterFormat(table_magic_number_);
|
|
|
|
if (legacy) {
|
|
|
|
result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n ");
|
|
|
|
result.append("index handle: " + index_handle_.ToString() + "\n ");
|
2015-04-24 02:17:57 +00:00
|
|
|
result.append("table_magic_number: " +
|
|
|
|
rocksdb::ToString(table_magic_number_) + "\n ");
|
2014-12-23 21:24:07 +00:00
|
|
|
} else {
|
2015-04-24 02:17:57 +00:00
|
|
|
result.append("checksum: " + rocksdb::ToString(checksum_) + "\n ");
|
2014-12-23 21:24:07 +00:00
|
|
|
result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n ");
|
|
|
|
result.append("index handle: " + index_handle_.ToString() + "\n ");
|
2015-04-24 02:17:57 +00:00
|
|
|
result.append("footer version: " + rocksdb::ToString(version_) + "\n ");
|
|
|
|
result.append("table_magic_number: " +
|
|
|
|
rocksdb::ToString(table_magic_number_) + "\n ");
|
2014-12-23 21:24:07 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
Status ReadFooterFromFile(RandomAccessFileReader* file,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
uint64_t file_size, Footer* footer,
|
|
|
|
uint64_t enforce_table_magic_number) {
|
2014-05-01 18:09:32 +00:00
|
|
|
if (file_size < Footer::kMinEncodedLength) {
|
2017-06-29 04:26:03 +00:00
|
|
|
return Status::Corruption(
|
|
|
|
"file is too short (" + ToString(file_size) + " bytes) to be an "
|
|
|
|
"sstable: " + file->file_name());
|
2013-12-05 00:35:48 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
char footer_space[Footer::kMaxEncodedLength];
|
2013-12-05 00:35:48 +00:00
|
|
|
Slice footer_input;
|
2014-11-13 19:39:30 +00:00
|
|
|
size_t read_offset =
|
|
|
|
(file_size > Footer::kMaxEncodedLength)
|
|
|
|
? static_cast<size_t>(file_size - Footer::kMaxEncodedLength)
|
|
|
|
: 0;
|
2017-08-11 18:59:13 +00:00
|
|
|
Status s;
|
|
|
|
if (prefetch_buffer == nullptr ||
|
|
|
|
!prefetch_buffer->TryReadFromCache(read_offset, Footer::kMaxEncodedLength,
|
|
|
|
&footer_input)) {
|
|
|
|
s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input,
|
|
|
|
footer_space);
|
|
|
|
if (!s.ok()) return s;
|
|
|
|
}
|
2013-12-05 00:35:48 +00:00
|
|
|
|
|
|
|
// Check that we actually read the whole footer from the file. It may be
|
|
|
|
// that size isn't correct.
|
2014-05-01 18:09:32 +00:00
|
|
|
if (footer_input.size() < Footer::kMinEncodedLength) {
|
2017-06-29 04:26:03 +00:00
|
|
|
return Status::Corruption(
|
|
|
|
"file is too short (" + ToString(file_size) + " bytes) to be an "
|
|
|
|
"sstable" + file->file_name());
|
2013-12-05 00:35:48 +00:00
|
|
|
}
|
|
|
|
|
2015-01-13 22:33:04 +00:00
|
|
|
s = footer->DecodeFrom(&footer_input);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (enforce_table_magic_number != 0 &&
|
|
|
|
enforce_table_magic_number != footer->table_magic_number()) {
|
2017-06-29 04:26:03 +00:00
|
|
|
return Status::Corruption(
|
|
|
|
"Bad table magic number: expected "
|
|
|
|
+ ToString(enforce_table_magic_number) + ", found "
|
|
|
|
+ ToString(footer->table_magic_number())
|
|
|
|
+ " in " + file->file_name());
|
2015-01-13 22:33:04 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
2013-12-05 00:35:48 +00:00
|
|
|
}
|
|
|
|
|
2014-11-13 19:39:30 +00:00
|
|
|
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
|
|
|
|
namespace {
|
2017-08-11 18:59:13 +00:00
|
|
|
Status CheckBlockChecksum(const ReadOptions& options, const Footer& footer,
|
|
|
|
const Slice& contents, size_t block_size,
|
|
|
|
RandomAccessFileReader* file,
|
|
|
|
const BlockHandle& handle) {
|
|
|
|
Status s;
|
|
|
|
// Check the crc of the type and the block contents
|
|
|
|
if (options.verify_checksums) {
|
|
|
|
const char* data = contents.data(); // Pointer to where Read put the data
|
|
|
|
PERF_TIMER_GUARD(block_checksum_time);
|
|
|
|
uint32_t value = DecodeFixed32(data + block_size + 1);
|
|
|
|
uint32_t actual = 0;
|
|
|
|
switch (footer.checksum()) {
|
|
|
|
case kCRC32c:
|
|
|
|
value = crc32c::Unmask(value);
|
|
|
|
actual = crc32c::Value(data, block_size + 1);
|
|
|
|
break;
|
|
|
|
case kxxHash:
|
|
|
|
actual = XXH32(data, static_cast<int>(block_size) + 1, 0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
s = Status::Corruption(
|
|
|
|
"unknown checksum type " + ToString(footer.checksum()) + " in " +
|
|
|
|
file->file_name() + " offset " + ToString(handle.offset()) +
|
|
|
|
" size " + ToString(block_size));
|
|
|
|
}
|
|
|
|
if (s.ok() && actual != value) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"block checksum mismatch: expected " + ToString(actual) + ", got " +
|
|
|
|
ToString(value) + " in " + file->file_name() + " offset " +
|
|
|
|
ToString(handle.offset()) + " size " + ToString(block_size));
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
2014-11-13 19:39:30 +00:00
|
|
|
|
2014-07-31 06:11:59 +00:00
|
|
|
// Read a block and check its CRC
|
|
|
|
// contents is the result of reading.
|
|
|
|
// According to the implementation of file->Read, contents may not point to buf
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
Status ReadBlock(RandomAccessFileReader* file, const Footer& footer,
|
|
|
|
const ReadOptions& options, const BlockHandle& handle,
|
|
|
|
Slice* contents, /* result of reading */ char* buf) {
|
2011-04-20 22:48:11 +00:00
|
|
|
size_t n = static_cast<size_t>(handle.size());
|
2014-08-22 22:28:58 +00:00
|
|
|
Status s;
|
|
|
|
|
|
|
|
{
|
|
|
|
PERF_TIMER_GUARD(block_read_time);
|
|
|
|
s = file->Read(handle.offset(), n + kBlockTrailerSize, contents, buf);
|
|
|
|
}
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_COUNTER_ADD(block_read_count, 1);
|
|
|
|
PERF_COUNTER_ADD(block_read_byte, n + kBlockTrailerSize);
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-07-31 06:11:59 +00:00
|
|
|
if (contents->size() != n + kBlockTrailerSize) {
|
2017-08-11 18:59:13 +00:00
|
|
|
return Status::Corruption("truncated block read from " + file->file_name() +
|
|
|
|
" offset " + ToString(handle.offset()) +
|
|
|
|
", expected " + ToString(n + kBlockTrailerSize) +
|
|
|
|
" bytes, got " + ToString(contents->size()));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2017-08-11 18:59:13 +00:00
|
|
|
return CheckBlockChecksum(options, footer, *contents, n, file, handle);
|
2014-07-31 06:11:59 +00:00
|
|
|
}
|
|
|
|
|
2014-11-13 19:39:30 +00:00
|
|
|
} // namespace
|
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
Status ReadBlockContents(RandomAccessFileReader* file,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const Footer& footer, const ReadOptions& read_options,
|
2015-12-16 02:20:10 +00:00
|
|
|
const BlockHandle& handle, BlockContents* contents,
|
2017-08-11 18:59:13 +00:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2016-07-19 16:44:03 +00:00
|
|
|
bool decompression_requested,
|
2015-12-16 02:20:10 +00:00
|
|
|
const Slice& compression_dict,
|
2016-07-19 16:44:03 +00:00
|
|
|
const PersistentCacheOptions& cache_options) {
|
2014-08-15 22:05:09 +00:00
|
|
|
Status status;
|
|
|
|
Slice slice;
|
|
|
|
size_t n = static_cast<size_t>(handle.size());
|
|
|
|
std::unique_ptr<char[]> heap_buf;
|
|
|
|
char stack_buf[DefaultStackBufferSize];
|
2014-09-17 22:08:19 +00:00
|
|
|
char* used_buf = nullptr;
|
2014-08-15 22:05:09 +00:00
|
|
|
rocksdb::CompressionType compression_type;
|
|
|
|
|
2015-12-16 02:20:10 +00:00
|
|
|
if (cache_options.persistent_cache &&
|
|
|
|
!cache_options.persistent_cache->IsCompressed()) {
|
|
|
|
status = PersistentCacheHelper::LookupUncompressedPage(cache_options,
|
|
|
|
handle, contents);
|
|
|
|
if (status.ok()) {
|
|
|
|
// uncompressed page is found for the block handle
|
|
|
|
return status;
|
|
|
|
} else {
|
|
|
|
// uncompressed page is not found
|
2016-07-19 16:44:03 +00:00
|
|
|
if (ioptions.info_log && !status.IsNotFound()) {
|
2015-12-16 02:20:10 +00:00
|
|
|
assert(!status.ok());
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_INFO(ioptions.info_log,
|
|
|
|
"Error reading from persistent cache. %s",
|
|
|
|
status.ToString().c_str());
|
2015-12-16 02:20:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
bool got_from_prefetch_buffer = false;
|
|
|
|
if (prefetch_buffer != nullptr &&
|
|
|
|
prefetch_buffer->TryReadFromCache(
|
|
|
|
handle.offset(),
|
|
|
|
static_cast<size_t>(handle.size()) + kBlockTrailerSize, &slice)) {
|
|
|
|
status =
|
|
|
|
CheckBlockChecksum(read_options, footer, slice,
|
|
|
|
static_cast<size_t>(handle.size()), file, handle);
|
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
got_from_prefetch_buffer = true;
|
|
|
|
used_buf = const_cast<char*>(slice.data());
|
|
|
|
} else if (cache_options.persistent_cache &&
|
|
|
|
cache_options.persistent_cache->IsCompressed()) {
|
2015-12-16 02:20:10 +00:00
|
|
|
// lookup uncompressed cache mode p-cache
|
|
|
|
status = PersistentCacheHelper::LookupRawPage(
|
|
|
|
cache_options, handle, &heap_buf, n + kBlockTrailerSize);
|
2013-09-02 06:23:40 +00:00
|
|
|
} else {
|
2015-12-16 02:20:10 +00:00
|
|
|
status = Status::NotFound();
|
2013-09-02 06:23:40 +00:00
|
|
|
}
|
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
if (!got_from_prefetch_buffer) {
|
|
|
|
if (status.ok()) {
|
|
|
|
// cache hit
|
2015-12-16 02:20:10 +00:00
|
|
|
used_buf = heap_buf.get();
|
2017-08-11 18:59:13 +00:00
|
|
|
slice = Slice(heap_buf.get(), n);
|
|
|
|
} else {
|
|
|
|
if (ioptions.info_log && !status.IsNotFound()) {
|
|
|
|
assert(!status.ok());
|
|
|
|
ROCKS_LOG_INFO(ioptions.info_log,
|
|
|
|
"Error reading from persistent cache. %s",
|
|
|
|
status.ToString().c_str());
|
|
|
|
}
|
|
|
|
// cache miss read from device
|
|
|
|
if (decompression_requested &&
|
|
|
|
n + kBlockTrailerSize < DefaultStackBufferSize) {
|
|
|
|
// If we've got a small enough hunk of data, read it in to the
|
|
|
|
// trivially allocated stack buffer instead of needing a full malloc()
|
|
|
|
used_buf = &stack_buf[0];
|
|
|
|
} else {
|
|
|
|
heap_buf = std::unique_ptr<char[]>(new char[n + kBlockTrailerSize]);
|
|
|
|
used_buf = heap_buf.get();
|
|
|
|
}
|
2015-12-16 02:20:10 +00:00
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
status = ReadBlock(file, footer, read_options, handle, &slice, used_buf);
|
|
|
|
if (status.ok() && read_options.fill_cache &&
|
|
|
|
cache_options.persistent_cache &&
|
|
|
|
cache_options.persistent_cache->IsCompressed()) {
|
|
|
|
// insert to raw cache
|
|
|
|
PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf,
|
|
|
|
n + kBlockTrailerSize);
|
|
|
|
}
|
2015-12-16 02:20:10 +00:00
|
|
|
}
|
2014-07-31 06:11:59 +00:00
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
2014-07-31 06:11:59 +00:00
|
|
|
}
|
|
|
|
|
2014-08-15 22:05:09 +00:00
|
|
|
PERF_TIMER_GUARD(block_decompress_time);
|
2014-07-31 06:11:59 +00:00
|
|
|
|
2014-08-15 22:05:09 +00:00
|
|
|
compression_type = static_cast<rocksdb::CompressionType>(slice.data()[n]);
|
|
|
|
|
|
|
|
if (decompression_requested && compression_type != kNoCompression) {
|
2015-12-16 02:20:10 +00:00
|
|
|
// compressed page, uncompress, update cache
|
|
|
|
status = UncompressBlockContents(slice.data(), n, contents,
|
2016-07-19 16:44:03 +00:00
|
|
|
footer.version(), compression_dict,
|
|
|
|
ioptions);
|
2015-12-16 02:20:10 +00:00
|
|
|
} else if (slice.data() != used_buf) {
|
|
|
|
// the slice content is not the buffer provided
|
2014-08-15 22:05:09 +00:00
|
|
|
*contents = BlockContents(Slice(slice.data(), n), false, compression_type);
|
2015-12-16 02:20:10 +00:00
|
|
|
} else {
|
|
|
|
// page is uncompressed, the buffer either stack or heap provided
|
2017-08-11 18:59:13 +00:00
|
|
|
if (got_from_prefetch_buffer || used_buf == &stack_buf[0]) {
|
2015-12-16 02:20:10 +00:00
|
|
|
heap_buf = std::unique_ptr<char[]>(new char[n]);
|
2017-08-11 18:59:13 +00:00
|
|
|
memcpy(heap_buf.get(), used_buf, n);
|
2015-12-16 02:20:10 +00:00
|
|
|
}
|
|
|
|
*contents = BlockContents(std::move(heap_buf), n, true, compression_type);
|
2014-07-31 06:11:59 +00:00
|
|
|
}
|
|
|
|
|
2017-08-11 18:59:13 +00:00
|
|
|
if (status.ok() && !got_from_prefetch_buffer && read_options.fill_cache &&
|
2015-12-16 02:20:10 +00:00
|
|
|
cache_options.persistent_cache &&
|
|
|
|
!cache_options.persistent_cache->IsCompressed()) {
|
|
|
|
// insert to uncompressed cache
|
|
|
|
PersistentCacheHelper::InsertUncompressedPage(cache_options, handle,
|
|
|
|
*contents);
|
2014-07-31 06:11:59 +00:00
|
|
|
}
|
2014-08-15 22:05:09 +00:00
|
|
|
|
|
|
|
return status;
|
2014-07-31 06:11:59 +00:00
|
|
|
}
|
|
|
|
|
2016-06-11 01:20:54 +00:00
|
|
|
Status UncompressBlockContentsForCompressionType(
|
|
|
|
const char* data, size_t n, BlockContents* contents,
|
|
|
|
uint32_t format_version, const Slice& compression_dict,
|
2016-07-19 16:44:03 +00:00
|
|
|
CompressionType compression_type, const ImmutableCFOptions &ioptions) {
|
2014-08-15 22:05:09 +00:00
|
|
|
std::unique_ptr<char[]> ubuf;
|
2016-06-11 01:20:54 +00:00
|
|
|
|
|
|
|
assert(compression_type != kNoCompression && "Invalid compression type");
|
|
|
|
|
2016-07-19 16:44:03 +00:00
|
|
|
StopWatchNano timer(ioptions.env,
|
|
|
|
ShouldReportDetailedTime(ioptions.env, ioptions.statistics));
|
2012-06-29 02:26:43 +00:00
|
|
|
int decompress_size = 0;
|
2016-06-11 01:20:54 +00:00
|
|
|
switch (compression_type) {
|
2011-03-22 23:24:02 +00:00
|
|
|
case kSnappyCompression: {
|
2011-07-21 02:40:18 +00:00
|
|
|
size_t ulength = 0;
|
2012-12-20 22:25:06 +00:00
|
|
|
static char snappy_corrupt_msg[] =
|
|
|
|
"Snappy not supported or corrupted Snappy compressed block contents";
|
2015-01-09 20:57:11 +00:00
|
|
|
if (!Snappy_GetUncompressedLength(data, n, &ulength)) {
|
2012-12-20 22:25:06 +00:00
|
|
|
return Status::Corruption(snappy_corrupt_msg);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2016-04-20 05:54:24 +00:00
|
|
|
ubuf.reset(new char[ulength]);
|
2015-01-09 20:57:11 +00:00
|
|
|
if (!Snappy_Uncompress(data, n, ubuf.get())) {
|
2012-12-20 22:25:06 +00:00
|
|
|
return Status::Corruption(snappy_corrupt_msg);
|
2011-07-21 02:40:18 +00:00
|
|
|
}
|
2014-08-15 22:05:09 +00:00
|
|
|
*contents = BlockContents(std::move(ubuf), ulength, true, kNoCompression);
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-06-28 06:41:33 +00:00
|
|
|
case kZlibCompression:
|
2016-04-20 05:54:24 +00:00
|
|
|
ubuf.reset(Zlib_Uncompress(
|
2015-01-15 00:24:24 +00:00
|
|
|
data, n, &decompress_size,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
GetCompressFormatForVersion(kZlibCompression, format_version),
|
|
|
|
compression_dict));
|
2012-06-28 06:41:33 +00:00
|
|
|
if (!ubuf) {
|
2014-10-01 05:27:39 +00:00
|
|
|
static char zlib_corrupt_msg[] =
|
|
|
|
"Zlib not supported or corrupted Zlib compressed block contents";
|
2012-12-20 22:25:06 +00:00
|
|
|
return Status::Corruption(zlib_corrupt_msg);
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
2014-09-17 22:08:19 +00:00
|
|
|
*contents =
|
|
|
|
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
2012-06-28 06:41:33 +00:00
|
|
|
break;
|
2012-06-29 02:26:43 +00:00
|
|
|
case kBZip2Compression:
|
2016-04-20 05:54:24 +00:00
|
|
|
ubuf.reset(BZip2_Uncompress(
|
2015-01-15 00:24:24 +00:00
|
|
|
data, n, &decompress_size,
|
|
|
|
GetCompressFormatForVersion(kBZip2Compression, format_version)));
|
2012-06-29 02:26:43 +00:00
|
|
|
if (!ubuf) {
|
2014-10-01 05:27:39 +00:00
|
|
|
static char bzip2_corrupt_msg[] =
|
|
|
|
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
|
2012-12-20 22:25:06 +00:00
|
|
|
return Status::Corruption(bzip2_corrupt_msg);
|
2012-06-29 02:26:43 +00:00
|
|
|
}
|
2014-09-17 22:08:19 +00:00
|
|
|
*contents =
|
|
|
|
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
2012-06-29 02:26:43 +00:00
|
|
|
break;
|
2014-02-08 02:12:30 +00:00
|
|
|
case kLZ4Compression:
|
2016-04-20 05:54:24 +00:00
|
|
|
ubuf.reset(LZ4_Uncompress(
|
2015-01-15 00:24:24 +00:00
|
|
|
data, n, &decompress_size,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
GetCompressFormatForVersion(kLZ4Compression, format_version),
|
|
|
|
compression_dict));
|
2014-02-08 02:12:30 +00:00
|
|
|
if (!ubuf) {
|
2014-10-01 05:27:39 +00:00
|
|
|
static char lz4_corrupt_msg[] =
|
|
|
|
"LZ4 not supported or corrupted LZ4 compressed block contents";
|
2014-02-08 02:12:30 +00:00
|
|
|
return Status::Corruption(lz4_corrupt_msg);
|
|
|
|
}
|
2014-09-17 22:08:19 +00:00
|
|
|
*contents =
|
|
|
|
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
2014-02-08 02:12:30 +00:00
|
|
|
break;
|
|
|
|
case kLZ4HCCompression:
|
2016-04-20 05:54:24 +00:00
|
|
|
ubuf.reset(LZ4_Uncompress(
|
2015-01-15 00:24:24 +00:00
|
|
|
data, n, &decompress_size,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
GetCompressFormatForVersion(kLZ4HCCompression, format_version),
|
|
|
|
compression_dict));
|
2014-02-08 02:12:30 +00:00
|
|
|
if (!ubuf) {
|
2014-10-01 05:27:39 +00:00
|
|
|
static char lz4hc_corrupt_msg[] =
|
|
|
|
"LZ4HC not supported or corrupted LZ4HC compressed block contents";
|
2014-02-08 02:12:30 +00:00
|
|
|
return Status::Corruption(lz4hc_corrupt_msg);
|
|
|
|
}
|
2014-09-17 22:08:19 +00:00
|
|
|
*contents =
|
|
|
|
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
2014-02-08 02:12:30 +00:00
|
|
|
break;
|
2016-04-20 05:54:24 +00:00
|
|
|
case kXpressCompression:
|
|
|
|
ubuf.reset(XPRESS_Uncompress(data, n, &decompress_size));
|
|
|
|
if (!ubuf) {
|
|
|
|
static char xpress_corrupt_msg[] =
|
|
|
|
"XPRESS not supported or corrupted XPRESS compressed block contents";
|
|
|
|
return Status::Corruption(xpress_corrupt_msg);
|
|
|
|
}
|
|
|
|
*contents =
|
|
|
|
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
|
|
|
break;
|
2016-09-01 22:28:40 +00:00
|
|
|
case kZSTD:
|
2015-08-27 22:40:42 +00:00
|
|
|
case kZSTDNotFinalCompression:
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
ubuf.reset(ZSTD_Uncompress(data, n, &decompress_size, compression_dict));
|
2015-08-27 22:40:42 +00:00
|
|
|
if (!ubuf) {
|
|
|
|
static char zstd_corrupt_msg[] =
|
|
|
|
"ZSTD not supported or corrupted ZSTD compressed block contents";
|
|
|
|
return Status::Corruption(zstd_corrupt_msg);
|
|
|
|
}
|
|
|
|
*contents =
|
|
|
|
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
default:
|
|
|
|
return Status::Corruption("bad block type");
|
|
|
|
}
|
2015-12-16 02:20:10 +00:00
|
|
|
|
2016-07-19 16:44:03 +00:00
|
|
|
if(ShouldReportDetailedTime(ioptions.env, ioptions.statistics)){
|
|
|
|
MeasureTime(ioptions.statistics, DECOMPRESSION_TIMES_NANOS,
|
|
|
|
timer.ElapsedNanos());
|
|
|
|
MeasureTime(ioptions.statistics, BYTES_DECOMPRESSED, contents->data.size());
|
|
|
|
RecordTick(ioptions.statistics, NUMBER_BLOCK_DECOMPRESSED);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2016-06-11 01:20:54 +00:00
|
|
|
//
|
|
|
|
// The 'data' points to the raw block contents that was read in from file.
|
|
|
|
// This method allocates a new heap buffer and the raw block
|
|
|
|
// contents are uncompresed into this buffer. This
|
|
|
|
// buffer is returned via 'result' and it is upto the caller to
|
|
|
|
// free this buffer.
|
|
|
|
// format_version is the block format as defined in include/rocksdb/table.h
|
|
|
|
Status UncompressBlockContents(const char* data, size_t n,
|
|
|
|
BlockContents* contents, uint32_t format_version,
|
2016-07-19 16:44:03 +00:00
|
|
|
const Slice& compression_dict,
|
|
|
|
const ImmutableCFOptions &ioptions) {
|
2016-06-11 01:20:54 +00:00
|
|
|
assert(data[n] != kNoCompression);
|
|
|
|
return UncompressBlockContentsForCompressionType(
|
|
|
|
data, n, contents, format_version, compression_dict,
|
2016-07-19 16:44:03 +00:00
|
|
|
(CompressionType)data[n], ioptions);
|
2016-06-11 01:20:54 +00:00
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|