2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
#include "table/block_based_table_builder.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
#include <assert.h>
|
2013-11-20 00:29:42 +00:00
|
|
|
#include <inttypes.h>
|
|
|
|
#include <stdio.h>
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
#include <map>
|
|
|
|
#include <memory>
|
2014-05-15 21:09:03 +00:00
|
|
|
#include <string>
|
|
|
|
#include <unordered_map>
|
2014-08-15 22:05:09 +00:00
|
|
|
#include <utility>
|
2014-03-01 02:19:07 +00:00
|
|
|
|
|
|
|
#include "db/dbformat.h"
|
|
|
|
|
2013-09-02 06:23:40 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
2014-03-01 02:19:07 +00:00
|
|
|
#include "rocksdb/flush_block_policy.h"
|
|
|
|
#include "rocksdb/table.h"
|
|
|
|
|
2013-09-02 06:23:40 +00:00
|
|
|
#include "table/block.h"
|
2014-03-01 02:19:07 +00:00
|
|
|
#include "table/block_based_table_reader.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/block_builder.h"
|
2012-04-17 15:36:46 +00:00
|
|
|
#include "table/filter_block.h"
|
2014-09-08 17:37:05 +00:00
|
|
|
#include "table/block_based_filter_block.h"
|
2015-02-05 01:03:57 +00:00
|
|
|
#include "table/block_based_table_factory.h"
|
2014-09-08 17:37:05 +00:00
|
|
|
#include "table/full_filter_block.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/format.h"
|
2013-12-05 21:09:13 +00:00
|
|
|
#include "table/meta_blocks.h"
|
2014-03-01 02:19:07 +00:00
|
|
|
#include "table/table_builder.h"
|
|
|
|
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-12 22:53:55 +00:00
|
|
|
#include "util/string_util.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
2015-01-09 20:57:11 +00:00
|
|
|
#include "util/compression.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/crc32c.h"
|
2013-06-17 17:11:10 +00:00
|
|
|
#include "util/stop_watch.h"
|
2014-05-01 18:09:32 +00:00
|
|
|
#include "util/xxhash.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
extern const std::string kHashIndexPrefixesBlock;
|
|
|
|
extern const std::string kHashIndexPrefixesMetadataBlock;
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
typedef BlockBasedTableOptions::IndexType IndexType;
|
|
|
|
|
|
|
|
// The interface for building index.
|
|
|
|
// Instruction for adding a new concrete IndexBuilder:
|
|
|
|
// 1. Create a subclass instantiated from IndexBuilder.
|
|
|
|
// 2. Add a new entry associated with that subclass in TableOptions::IndexType.
|
|
|
|
// 3. Add a create function for the new subclass in CreateIndexBuilder.
|
|
|
|
// Note: we can devise more advanced design to simplify the process for adding
|
|
|
|
// new subclass, which will, on the other hand, increase the code complexity and
|
|
|
|
// catch unwanted attention from readers. Given that we won't add/change
|
|
|
|
// indexes frequently, it makes sense to just embrace a more straightforward
|
|
|
|
// design that just works.
|
|
|
|
class IndexBuilder {
|
|
|
|
public:
|
2014-05-15 21:09:03 +00:00
|
|
|
// Index builder will construct a set of blocks which contain:
|
|
|
|
// 1. One primary index block.
|
|
|
|
// 2. (Optional) a set of metablocks that contains the metadata of the
|
|
|
|
// primary index.
|
|
|
|
struct IndexBlocks {
|
|
|
|
Slice index_block_contents;
|
|
|
|
std::unordered_map<std::string, Slice> meta_blocks;
|
|
|
|
};
|
2014-03-01 02:19:07 +00:00
|
|
|
explicit IndexBuilder(const Comparator* comparator)
|
|
|
|
: comparator_(comparator) {}
|
|
|
|
|
|
|
|
virtual ~IndexBuilder() {}
|
|
|
|
|
|
|
|
// Add a new index entry to index block.
|
|
|
|
// To allow further optimization, we provide `last_key_in_current_block` and
|
|
|
|
// `first_key_in_next_block`, based on which the specific implementation can
|
|
|
|
// determine the best index key to be used for the index block.
|
|
|
|
// @last_key_in_current_block: this parameter maybe overridden with the value
|
|
|
|
// "substitute key".
|
|
|
|
// @first_key_in_next_block: it will be nullptr if the entry being added is
|
|
|
|
// the last one in the table
|
|
|
|
//
|
|
|
|
// REQUIRES: Finish() has not yet been called.
|
2014-05-15 21:09:03 +00:00
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) = 0;
|
|
|
|
|
|
|
|
// This method will be called whenever a key is added. The subclasses may
|
|
|
|
// override OnKeyAdded() if they need to collect additional information.
|
|
|
|
virtual void OnKeyAdded(const Slice& key) {}
|
2014-03-01 02:19:07 +00:00
|
|
|
|
|
|
|
// Inform the index builder that all entries has been written. Block builder
|
|
|
|
// may therefore perform any operation required for block finalization.
|
|
|
|
//
|
|
|
|
// REQUIRES: Finish() has not yet been called.
|
2014-05-15 21:09:03 +00:00
|
|
|
virtual Status Finish(IndexBlocks* index_blocks) = 0;
|
2014-03-01 02:19:07 +00:00
|
|
|
|
|
|
|
// Get the estimated size for index block.
|
|
|
|
virtual size_t EstimatedSize() const = 0;
|
|
|
|
|
|
|
|
protected:
|
|
|
|
const Comparator* comparator_;
|
|
|
|
};
|
|
|
|
|
2014-04-11 00:23:49 +00:00
|
|
|
// This index builder builds space-efficient index block.
|
2014-03-01 02:19:07 +00:00
|
|
|
//
|
|
|
|
// Optimizations:
|
|
|
|
// 1. Made block's `block_restart_interval` to be 1, which will avoid linear
|
|
|
|
// search when doing index lookup.
|
|
|
|
// 2. Shorten the key length for index block. Other than honestly using the
|
|
|
|
// last key in the data block as the index key, we instead find a shortest
|
|
|
|
// substitute key that serves the same function.
|
2014-04-10 21:19:43 +00:00
|
|
|
class ShortenedIndexBuilder : public IndexBuilder {
|
2014-03-01 02:19:07 +00:00
|
|
|
public:
|
2014-04-10 21:19:43 +00:00
|
|
|
explicit ShortenedIndexBuilder(const Comparator* comparator)
|
2014-03-01 02:19:07 +00:00
|
|
|
: IndexBuilder(comparator),
|
2014-09-02 18:49:38 +00:00
|
|
|
index_block_builder_(1 /* block_restart_interval == 1 */) {}
|
2014-03-01 02:19:07 +00:00
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) override {
|
2014-03-01 02:19:07 +00:00
|
|
|
if (first_key_in_next_block != nullptr) {
|
|
|
|
comparator_->FindShortestSeparator(last_key_in_current_block,
|
|
|
|
*first_key_in_next_block);
|
|
|
|
} else {
|
|
|
|
comparator_->FindShortSuccessor(last_key_in_current_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string handle_encoding;
|
|
|
|
block_handle.EncodeTo(&handle_encoding);
|
|
|
|
index_block_builder_.Add(*last_key_in_current_block, handle_encoding);
|
|
|
|
}
|
|
|
|
|
2014-09-01 14:54:10 +00:00
|
|
|
virtual Status Finish(IndexBlocks* index_blocks) override {
|
2014-05-15 21:09:03 +00:00
|
|
|
index_blocks->index_block_contents = index_block_builder_.Finish();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2014-03-01 02:19:07 +00:00
|
|
|
|
2014-09-01 14:54:10 +00:00
|
|
|
virtual size_t EstimatedSize() const override {
|
2014-03-01 02:19:07 +00:00
|
|
|
return index_block_builder_.CurrentSizeEstimate();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
BlockBuilder index_block_builder_;
|
|
|
|
};
|
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
// HashIndexBuilder contains a binary-searchable primary index and the
|
|
|
|
// metadata for secondary hash index construction.
|
|
|
|
// The metadata for hash index consists two parts:
|
|
|
|
// - a metablock that compactly contains a sequence of prefixes. All prefixes
|
|
|
|
// are stored consectively without any metadata (like, prefix sizes) being
|
|
|
|
// stored, which is kept in the other metablock.
|
|
|
|
// - a metablock contains the metadata of the prefixes, including prefix size,
|
|
|
|
// restart index and number of block it spans. The format looks like:
|
|
|
|
//
|
|
|
|
// +-----------------+---------------------------+---------------------+ <=prefix 1
|
|
|
|
// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
|
|
|
|
// +-----------------+---------------------------+---------------------+ <=prefix 2
|
|
|
|
// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
// | |
|
|
|
|
// | .... |
|
|
|
|
// | |
|
|
|
|
// +-----------------+---------------------------+---------------------+ <=prefix n
|
|
|
|
// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
|
|
|
|
// +-----------------+---------------------------+---------------------+
|
|
|
|
//
|
|
|
|
// The reason of separating these two metablocks is to enable the efficiently
|
|
|
|
// reuse the first metablock during hash index construction without unnecessary
|
|
|
|
// data copy or small heap allocations for prefixes.
|
|
|
|
class HashIndexBuilder : public IndexBuilder {
|
2014-04-10 21:19:43 +00:00
|
|
|
public:
|
2014-05-15 21:09:03 +00:00
|
|
|
explicit HashIndexBuilder(const Comparator* comparator,
|
|
|
|
const SliceTransform* hash_key_extractor)
|
2014-04-10 21:19:43 +00:00
|
|
|
: IndexBuilder(comparator),
|
2014-09-01 14:54:10 +00:00
|
|
|
primary_index_builder_(comparator),
|
2014-05-15 21:09:03 +00:00
|
|
|
hash_key_extractor_(hash_key_extractor) {}
|
|
|
|
|
|
|
|
virtual void AddIndexEntry(std::string* last_key_in_current_block,
|
|
|
|
const Slice* first_key_in_next_block,
|
|
|
|
const BlockHandle& block_handle) override {
|
|
|
|
++current_restart_index_;
|
2014-09-01 14:54:10 +00:00
|
|
|
primary_index_builder_.AddIndexEntry(last_key_in_current_block,
|
2014-05-15 21:09:03 +00:00
|
|
|
first_key_in_next_block, block_handle);
|
|
|
|
}
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
virtual void OnKeyAdded(const Slice& key) override {
|
|
|
|
auto key_prefix = hash_key_extractor_->Transform(key);
|
|
|
|
bool is_first_entry = pending_block_num_ == 0;
|
|
|
|
|
|
|
|
// Keys may share the prefix
|
|
|
|
if (is_first_entry || pending_entry_prefix_ != key_prefix) {
|
|
|
|
if (!is_first_entry) {
|
|
|
|
FlushPendingPrefix();
|
|
|
|
}
|
|
|
|
|
|
|
|
// need a hard copy otherwise the underlying data changes all the time.
|
|
|
|
// TODO(kailiu) ToString() is expensive. We may speed up can avoid data
|
|
|
|
// copy.
|
|
|
|
pending_entry_prefix_ = key_prefix.ToString();
|
|
|
|
pending_block_num_ = 1;
|
2014-11-11 21:47:22 +00:00
|
|
|
pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
|
2014-05-15 21:09:03 +00:00
|
|
|
} else {
|
|
|
|
// entry number increments when keys share the prefix reside in
|
2015-04-25 09:14:27 +00:00
|
|
|
// different data blocks.
|
2014-05-15 21:09:03 +00:00
|
|
|
auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1;
|
|
|
|
assert(last_restart_index <= current_restart_index_);
|
|
|
|
if (last_restart_index != current_restart_index_) {
|
|
|
|
++pending_block_num_;
|
|
|
|
}
|
|
|
|
}
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
|
|
|
|
2014-09-01 14:54:10 +00:00
|
|
|
virtual Status Finish(IndexBlocks* index_blocks) override {
|
2014-05-15 21:09:03 +00:00
|
|
|
FlushPendingPrefix();
|
2014-09-01 14:54:10 +00:00
|
|
|
primary_index_builder_.Finish(index_blocks);
|
2014-05-15 21:09:03 +00:00
|
|
|
index_blocks->meta_blocks.insert(
|
|
|
|
{kHashIndexPrefixesBlock.c_str(), prefix_block_});
|
|
|
|
index_blocks->meta_blocks.insert(
|
|
|
|
{kHashIndexPrefixesMetadataBlock.c_str(), prefix_meta_block_});
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2014-09-01 14:54:10 +00:00
|
|
|
virtual size_t EstimatedSize() const override {
|
|
|
|
return primary_index_builder_.EstimatedSize() + prefix_block_.size() +
|
2014-05-15 21:09:03 +00:00
|
|
|
prefix_meta_block_.size();
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2014-05-15 21:09:03 +00:00
|
|
|
void FlushPendingPrefix() {
|
|
|
|
prefix_block_.append(pending_entry_prefix_.data(),
|
|
|
|
pending_entry_prefix_.size());
|
2014-11-11 21:47:22 +00:00
|
|
|
PutVarint32(&prefix_meta_block_,
|
|
|
|
static_cast<uint32_t>(pending_entry_prefix_.size()));
|
2014-05-15 21:09:03 +00:00
|
|
|
PutVarint32(&prefix_meta_block_, pending_entry_index_);
|
|
|
|
PutVarint32(&prefix_meta_block_, pending_block_num_);
|
|
|
|
}
|
|
|
|
|
2014-09-01 14:54:10 +00:00
|
|
|
ShortenedIndexBuilder primary_index_builder_;
|
2014-05-15 21:09:03 +00:00
|
|
|
const SliceTransform* hash_key_extractor_;
|
|
|
|
|
|
|
|
// stores a sequence of prefixes
|
|
|
|
std::string prefix_block_;
|
|
|
|
// stores the metadata of prefixes
|
|
|
|
std::string prefix_meta_block_;
|
|
|
|
|
|
|
|
// The following 3 variables keeps unflushed prefix and its metadata.
|
|
|
|
// The details of block_num and entry_index can be found in
|
|
|
|
// "block_hash_index.{h,cc}"
|
|
|
|
uint32_t pending_block_num_ = 0;
|
|
|
|
uint32_t pending_entry_index_ = 0;
|
|
|
|
std::string pending_entry_prefix_;
|
|
|
|
|
|
|
|
uint64_t current_restart_index_ = 0;
|
2014-04-10 21:19:43 +00:00
|
|
|
};
|
|
|
|
|
2014-11-13 19:39:30 +00:00
|
|
|
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
|
|
|
|
namespace {
|
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
// Create a index builder based on its type.
|
2014-05-15 21:09:03 +00:00
|
|
|
IndexBuilder* CreateIndexBuilder(IndexType type, const Comparator* comparator,
|
|
|
|
const SliceTransform* prefix_extractor) {
|
2014-03-01 02:19:07 +00:00
|
|
|
switch (type) {
|
|
|
|
case BlockBasedTableOptions::kBinarySearch: {
|
2014-04-10 21:19:43 +00:00
|
|
|
return new ShortenedIndexBuilder(comparator);
|
|
|
|
}
|
2014-05-15 21:09:03 +00:00
|
|
|
case BlockBasedTableOptions::kHashSearch: {
|
|
|
|
return new HashIndexBuilder(comparator, prefix_extractor);
|
|
|
|
}
|
2014-03-01 02:19:07 +00:00
|
|
|
default: {
|
|
|
|
assert(!"Do not recognize the index type ");
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// impossible.
|
|
|
|
assert(false);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2014-09-08 17:37:05 +00:00
|
|
|
// Create a index builder based on its type.
|
|
|
|
FilterBlockBuilder* CreateFilterBlockBuilder(const ImmutableCFOptions& opt,
|
|
|
|
const BlockBasedTableOptions& table_opt) {
|
|
|
|
if (table_opt.filter_policy == nullptr) return nullptr;
|
|
|
|
|
|
|
|
FilterBitsBuilder* filter_bits_builder =
|
|
|
|
table_opt.filter_policy->GetFilterBitsBuilder();
|
|
|
|
if (filter_bits_builder == nullptr) {
|
|
|
|
return new BlockBasedFilterBlockBuilder(opt.prefix_extractor, table_opt);
|
|
|
|
} else {
|
2015-02-05 01:03:57 +00:00
|
|
|
return new FullFilterBlockBuilder(opt.prefix_extractor,
|
|
|
|
table_opt.whole_key_filtering,
|
2014-09-08 17:37:05 +00:00
|
|
|
filter_bits_builder);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) {
|
2013-10-10 18:43:24 +00:00
|
|
|
// Check to see if compressed less than 12.5%
|
|
|
|
return compressed_size < raw_size - (raw_size / 8u);
|
|
|
|
}
|
|
|
|
|
2015-01-15 00:24:24 +00:00
|
|
|
// format_version is the block format as defined in include/rocksdb/table.h
|
2014-03-01 02:19:07 +00:00
|
|
|
Slice CompressBlock(const Slice& raw,
|
|
|
|
const CompressionOptions& compression_options,
|
2015-01-15 00:24:24 +00:00
|
|
|
CompressionType* type, uint32_t format_version,
|
|
|
|
std::string* compressed_output) {
|
2014-03-01 02:19:07 +00:00
|
|
|
if (*type == kNoCompression) {
|
|
|
|
return raw;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Will return compressed block contents if (1) the compression method is
|
|
|
|
// supported in this platform and (2) the compression rate is "good enough".
|
|
|
|
switch (*type) {
|
|
|
|
case kSnappyCompression:
|
2015-01-09 20:57:11 +00:00
|
|
|
if (Snappy_Compress(compression_options, raw.data(), raw.size(),
|
|
|
|
compressed_output) &&
|
2014-03-01 02:19:07 +00:00
|
|
|
GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
break; // fall back to no compression.
|
|
|
|
case kZlibCompression:
|
2015-01-15 00:24:24 +00:00
|
|
|
if (Zlib_Compress(
|
|
|
|
compression_options,
|
|
|
|
GetCompressFormatForVersion(kZlibCompression, format_version),
|
|
|
|
raw.data(), raw.size(), compressed_output) &&
|
2014-03-01 02:19:07 +00:00
|
|
|
GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
break; // fall back to no compression.
|
|
|
|
case kBZip2Compression:
|
2015-01-15 00:24:24 +00:00
|
|
|
if (BZip2_Compress(
|
|
|
|
compression_options,
|
|
|
|
GetCompressFormatForVersion(kBZip2Compression, format_version),
|
|
|
|
raw.data(), raw.size(), compressed_output) &&
|
2014-03-01 02:19:07 +00:00
|
|
|
GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
break; // fall back to no compression.
|
|
|
|
case kLZ4Compression:
|
2015-01-15 00:24:24 +00:00
|
|
|
if (LZ4_Compress(
|
|
|
|
compression_options,
|
|
|
|
GetCompressFormatForVersion(kLZ4Compression, format_version),
|
|
|
|
raw.data(), raw.size(), compressed_output) &&
|
2014-03-01 02:19:07 +00:00
|
|
|
GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
break; // fall back to no compression.
|
|
|
|
case kLZ4HCCompression:
|
2015-01-15 00:24:24 +00:00
|
|
|
if (LZ4HC_Compress(
|
|
|
|
compression_options,
|
|
|
|
GetCompressFormatForVersion(kLZ4HCCompression, format_version),
|
|
|
|
raw.data(), raw.size(), compressed_output) &&
|
2014-03-01 02:19:07 +00:00
|
|
|
GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
break; // fall back to no compression.
|
2015-08-27 22:40:42 +00:00
|
|
|
case kZSTDNotFinalCompression:
|
|
|
|
if (ZSTD_Compress(compression_options, raw.data(), raw.size(),
|
|
|
|
compressed_output) &&
|
|
|
|
GoodCompressionRatio(compressed_output->size(), raw.size())) {
|
|
|
|
return *compressed_output;
|
|
|
|
}
|
|
|
|
break; // fall back to no compression.
|
2014-03-01 02:19:07 +00:00
|
|
|
default: {} // Do not recognize this compression type
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compression method is not supported, or not good compression ratio, so just
|
|
|
|
// fall back to uncompressed form.
|
|
|
|
*type = kNoCompression;
|
|
|
|
return raw;
|
|
|
|
}
|
|
|
|
|
2014-11-13 19:39:30 +00:00
|
|
|
} // namespace
|
|
|
|
|
2013-12-05 00:35:48 +00:00
|
|
|
// kBlockBasedTableMagicNumber was picked by running
|
2014-05-01 18:09:32 +00:00
|
|
|
// echo rocksdb.table.block_based | sha1sum
|
2013-12-04 23:09:41 +00:00
|
|
|
// and taking the leading 64 bits.
|
2015-07-13 19:11:05 +00:00
|
|
|
// Please note that kBlockBasedTableMagicNumber may also be accessed by other
|
|
|
|
// .cc files
|
|
|
|
// for that reason we declare it extern in the header but to get the space
|
|
|
|
// allocated
|
2015-07-01 23:13:49 +00:00
|
|
|
// it must be not extern in one place.
|
|
|
|
const uint64_t kBlockBasedTableMagicNumber = 0x88e241b785f4cff7ull;
|
2014-05-01 18:09:32 +00:00
|
|
|
// We also support reading and writing legacy block based table format (for
|
|
|
|
// backwards compatibility)
|
2015-07-01 23:13:49 +00:00
|
|
|
const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull;
|
2013-12-04 23:09:41 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
// A collector that collects properties of interest to block-based table.
|
|
|
|
// For now this class looks heavy-weight since we only write one additional
|
|
|
|
// property.
|
2015-04-25 09:14:27 +00:00
|
|
|
// But in the foreseeable future, we will add more and more properties that are
|
2014-03-01 02:19:07 +00:00
|
|
|
// specific to block-based table.
|
|
|
|
class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
: public IntTblPropCollector {
|
2014-03-01 02:19:07 +00:00
|
|
|
public:
|
2014-05-15 21:09:03 +00:00
|
|
|
explicit BlockBasedTablePropertiesCollector(
|
2015-02-05 01:03:57 +00:00
|
|
|
BlockBasedTableOptions::IndexType index_type, bool whole_key_filtering,
|
|
|
|
bool prefix_filtering)
|
|
|
|
: index_type_(index_type),
|
|
|
|
whole_key_filtering_(whole_key_filtering),
|
|
|
|
prefix_filtering_(prefix_filtering) {}
|
2014-03-01 02:19:07 +00:00
|
|
|
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
virtual Status InternalAdd(const Slice& key, const Slice& value,
|
|
|
|
uint64_t file_size) override {
|
2014-03-01 02:19:07 +00:00
|
|
|
// Intentionally left blank. Have no interest in collecting stats for
|
|
|
|
// individual key/value pairs.
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Finish(UserCollectedProperties* properties) override {
|
2014-03-01 02:19:07 +00:00
|
|
|
std::string val;
|
|
|
|
PutFixed32(&val, static_cast<uint32_t>(index_type_));
|
|
|
|
properties->insert({BlockBasedTablePropertyNames::kIndexType, val});
|
2015-02-05 01:03:57 +00:00
|
|
|
properties->insert({BlockBasedTablePropertyNames::kWholeKeyFiltering,
|
|
|
|
whole_key_filtering_ ? kPropTrue : kPropFalse});
|
|
|
|
properties->insert({BlockBasedTablePropertyNames::kPrefixFiltering,
|
|
|
|
prefix_filtering_ ? kPropTrue : kPropFalse});
|
2014-03-01 02:19:07 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// The name of the properties collector can be used for debugging purpose.
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual const char* Name() const override {
|
2014-03-01 02:19:07 +00:00
|
|
|
return "BlockBasedTablePropertiesCollector";
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual UserCollectedProperties GetReadableProperties() const override {
|
2014-03-01 02:19:07 +00:00
|
|
|
// Intentionally left blank.
|
|
|
|
return UserCollectedProperties();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
BlockBasedTableOptions::IndexType index_type_;
|
2015-02-05 01:03:57 +00:00
|
|
|
bool whole_key_filtering_;
|
|
|
|
bool prefix_filtering_;
|
2014-03-01 02:19:07 +00:00
|
|
|
};
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
struct BlockBasedTableBuilder::Rep {
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions;
|
2014-08-25 21:22:05 +00:00
|
|
|
const BlockBasedTableOptions table_options;
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_comparator;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
WritableFileWriter* file;
|
2013-10-10 18:43:24 +00:00
|
|
|
uint64_t offset = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
Status status;
|
|
|
|
BlockBuilder data_block;
|
2014-05-15 21:09:03 +00:00
|
|
|
|
|
|
|
InternalKeySliceTransform internal_prefix_transform;
|
2014-03-01 02:19:07 +00:00
|
|
|
std::unique_ptr<IndexBuilder> index_builder;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string last_key;
|
2014-09-04 23:18:36 +00:00
|
|
|
const CompressionType compression_type;
|
|
|
|
const CompressionOptions compression_opts;
|
2013-11-20 00:29:42 +00:00
|
|
|
TableProperties props;
|
2013-10-10 18:43:24 +00:00
|
|
|
|
|
|
|
bool closed = false; // Either Finish() or Abandon() has been called.
|
2014-09-03 16:44:49 +00:00
|
|
|
std::unique_ptr<FilterBlockBuilder> filter_block;
|
2013-09-02 06:23:40 +00:00
|
|
|
char compressed_cache_key_prefix[BlockBasedTable::kMaxCacheKeyPrefixSize];
|
|
|
|
size_t compressed_cache_key_prefix_size;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
BlockHandle pending_handle; // Handle to add to index block
|
|
|
|
|
|
|
|
std::string compressed_output;
|
2013-11-08 05:27:21 +00:00
|
|
|
std::unique_ptr<FlushBlockPolicy> flush_block_policy;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollector>> table_properties_collectors;
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 19:30:55 +00:00
|
|
|
|
2014-10-31 18:59:54 +00:00
|
|
|
Rep(const ImmutableCFOptions& _ioptions,
|
2014-09-04 23:18:36 +00:00
|
|
|
const BlockBasedTableOptions& table_opt,
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
const InternalKeyComparator& icomparator,
|
|
|
|
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
|
|
|
|
int_tbl_prop_collector_factories,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
WritableFileWriter* f, const CompressionType _compression_type,
|
2015-02-17 16:03:45 +00:00
|
|
|
const CompressionOptions& _compression_opts, const bool skip_filters)
|
2014-10-31 18:59:54 +00:00
|
|
|
: ioptions(_ioptions),
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options(table_opt),
|
2014-01-27 21:53:22 +00:00
|
|
|
internal_comparator(icomparator),
|
2011-03-18 22:37:00 +00:00
|
|
|
file(f),
|
2014-09-02 18:49:38 +00:00
|
|
|
data_block(table_options.block_restart_interval),
|
2014-10-31 18:59:54 +00:00
|
|
|
internal_prefix_transform(_ioptions.prefix_extractor),
|
|
|
|
index_builder(CreateIndexBuilder(table_options.index_type,
|
|
|
|
&internal_comparator,
|
|
|
|
&this->internal_prefix_transform)),
|
|
|
|
compression_type(_compression_type),
|
|
|
|
compression_opts(_compression_opts),
|
2015-02-17 16:03:45 +00:00
|
|
|
filter_block(skip_filters ? nullptr : CreateFilterBlockBuilder(
|
|
|
|
_ioptions, table_options)),
|
2014-08-25 21:22:05 +00:00
|
|
|
flush_block_policy(
|
|
|
|
table_options.flush_block_policy_factory->NewFlushBlockPolicy(
|
2014-10-31 18:59:54 +00:00
|
|
|
table_options, data_block)) {
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
for (auto& collector_factories : *int_tbl_prop_collector_factories) {
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 19:30:55 +00:00
|
|
|
table_properties_collectors.emplace_back(
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
collector_factories->CreateIntTblPropCollector());
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 19:30:55 +00:00
|
|
|
}
|
|
|
|
table_properties_collectors.emplace_back(
|
2015-02-05 01:03:57 +00:00
|
|
|
new BlockBasedTablePropertiesCollector(
|
|
|
|
table_options.index_type, table_options.whole_key_filtering,
|
|
|
|
_ioptions.prefix_extractor != nullptr));
|
2014-03-01 02:19:07 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2013-11-20 06:00:48 +00:00
|
|
|
BlockBasedTableBuilder::BlockBasedTableBuilder(
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
|
|
|
|
int_tbl_prop_collector_factories,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
WritableFileWriter* file, const CompressionType compression_type,
|
2015-02-17 16:03:45 +00:00
|
|
|
const CompressionOptions& compression_opts, const bool skip_filters) {
|
2015-01-13 22:33:04 +00:00
|
|
|
BlockBasedTableOptions sanitized_table_options(table_options);
|
|
|
|
if (sanitized_table_options.format_version == 0 &&
|
|
|
|
sanitized_table_options.checksum != kCRC32c) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, ioptions.info_log,
|
|
|
|
"Silently converting format_version to 1 because checksum is "
|
|
|
|
"non-default");
|
|
|
|
// silently convert format_version to 1 to keep consistent with current
|
|
|
|
// behavior
|
|
|
|
sanitized_table_options.format_version = 1;
|
|
|
|
}
|
|
|
|
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
rep_ = new Rep(ioptions, sanitized_table_options, internal_comparator,
|
|
|
|
int_tbl_prop_collector_factories, file, compression_type,
|
|
|
|
compression_opts, skip_filters);
|
2015-02-17 16:03:45 +00:00
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
if (rep_->filter_block != nullptr) {
|
2012-04-17 15:36:46 +00:00
|
|
|
rep_->filter_block->StartBlock(0);
|
|
|
|
}
|
2014-08-25 21:22:05 +00:00
|
|
|
if (table_options.block_cache_compressed.get() != nullptr) {
|
2013-12-03 19:17:58 +00:00
|
|
|
BlockBasedTable::GenerateCachePrefix(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
table_options.block_cache_compressed.get(), file->writable_file(),
|
2013-12-03 19:17:58 +00:00
|
|
|
&rep_->compressed_cache_key_prefix[0],
|
|
|
|
&rep_->compressed_cache_key_prefix_size);
|
2013-09-02 06:23:40 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
BlockBasedTableBuilder::~BlockBasedTableBuilder() {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(rep_->closed); // Catch errors where caller forgot to call Finish()
|
|
|
|
delete rep_;
|
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Rep* r = rep_;
|
|
|
|
assert(!r->closed);
|
|
|
|
if (!ok()) return;
|
2013-11-20 00:29:42 +00:00
|
|
|
if (r->props.num_entries > 0) {
|
2014-01-27 21:53:22 +00:00
|
|
|
assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2014-06-13 02:03:22 +00:00
|
|
|
|
2013-11-08 05:27:21 +00:00
|
|
|
auto should_flush = r->flush_block_policy->Update(key, value);
|
|
|
|
if (should_flush) {
|
|
|
|
assert(!r->data_block.empty());
|
2013-05-15 17:34:02 +00:00
|
|
|
Flush();
|
|
|
|
|
2013-11-08 05:27:21 +00:00
|
|
|
// Add item to index block.
|
|
|
|
// We do not emit the index entry for a block until we have seen the
|
|
|
|
// first key for the next data block. This allows us to use shorter
|
|
|
|
// keys in the index block. For example, consider a block boundary
|
|
|
|
// between the keys "the quick brown fox" and "the who". We can use
|
|
|
|
// "the r" as the key for the index block entry since it is >= all
|
|
|
|
// entries in the first block and < all entries in subsequent
|
|
|
|
// blocks.
|
|
|
|
if (ok()) {
|
2014-05-15 21:09:03 +00:00
|
|
|
r->index_builder->AddIndexEntry(&r->last_key, &key, r->pending_handle);
|
2013-11-08 05:27:21 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
if (r->filter_block != nullptr) {
|
2014-09-08 17:37:05 +00:00
|
|
|
r->filter_block->Add(ExtractUserKey(key));
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
r->last_key.assign(key.data(), key.size());
|
|
|
|
r->data_block.Add(key, value);
|
2013-11-20 00:29:42 +00:00
|
|
|
r->props.num_entries++;
|
|
|
|
r->props.raw_key_size += key.size();
|
|
|
|
r->props.raw_value_size += value.size();
|
2013-10-16 18:50:50 +00:00
|
|
|
|
2014-06-13 02:03:22 +00:00
|
|
|
r->index_builder->OnKeyAdded(key);
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
NotifyCollectTableCollectorsOnAdd(key, value, r->offset,
|
|
|
|
r->table_properties_collectors,
|
2014-09-04 23:18:36 +00:00
|
|
|
r->ioptions.info_log);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
void BlockBasedTableBuilder::Flush() {
|
2011-03-18 22:37:00 +00:00
|
|
|
Rep* r = rep_;
|
|
|
|
assert(!r->closed);
|
|
|
|
if (!ok()) return;
|
|
|
|
if (r->data_block.empty()) return;
|
|
|
|
WriteBlock(&r->data_block, &r->pending_handle);
|
|
|
|
if (ok()) {
|
|
|
|
r->status = r->file->Flush();
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
if (r->filter_block != nullptr) {
|
2012-04-17 15:36:46 +00:00
|
|
|
r->filter_block->StartBlock(r->offset);
|
|
|
|
}
|
2013-11-20 00:29:42 +00:00
|
|
|
r->props.data_size = r->offset;
|
|
|
|
++r->props.num_data_blocks;
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
void BlockBasedTableBuilder::WriteBlock(BlockBuilder* block,
|
|
|
|
BlockHandle* handle) {
|
2014-03-01 02:19:07 +00:00
|
|
|
WriteBlock(block->Finish(), handle);
|
|
|
|
block->Reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
|
|
|
|
BlockHandle* handle) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// File format contains a sequence of blocks where each block has:
|
|
|
|
// block_data: uint8[n]
|
|
|
|
// type: uint8
|
|
|
|
// crc: uint32
|
|
|
|
assert(ok());
|
|
|
|
Rep* r = rep_;
|
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
auto type = r->compression_type;
|
2014-06-09 19:26:09 +00:00
|
|
|
Slice block_contents;
|
|
|
|
if (raw_block_contents.size() < kCompressionSizeLimit) {
|
|
|
|
block_contents =
|
2014-09-04 23:18:36 +00:00
|
|
|
CompressBlock(raw_block_contents, r->compression_opts, &type,
|
2015-01-15 00:24:24 +00:00
|
|
|
r->table_options.format_version, &r->compressed_output);
|
2014-06-09 19:26:09 +00:00
|
|
|
} else {
|
2014-09-04 23:18:36 +00:00
|
|
|
RecordTick(r->ioptions.statistics, NUMBER_BLOCK_NOT_COMPRESSED);
|
2014-06-09 19:26:09 +00:00
|
|
|
type = kNoCompression;
|
|
|
|
block_contents = raw_block_contents;
|
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
WriteRawBlock(block_contents, type, handle);
|
|
|
|
r->compressed_output.clear();
|
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
|
|
|
|
CompressionType type,
|
|
|
|
BlockHandle* handle) {
|
2012-04-17 15:36:46 +00:00
|
|
|
Rep* r = rep_;
|
2014-09-04 23:18:36 +00:00
|
|
|
StopWatch sw(r->ioptions.env, r->ioptions.statistics, WRITE_RAW_BLOCK_MICROS);
|
2011-03-18 22:37:00 +00:00
|
|
|
handle->set_offset(r->offset);
|
|
|
|
handle->set_size(block_contents.size());
|
|
|
|
r->status = r->file->Append(block_contents);
|
|
|
|
if (r->status.ok()) {
|
|
|
|
char trailer[kBlockTrailerSize];
|
|
|
|
trailer[0] = type;
|
2014-05-01 18:09:32 +00:00
|
|
|
char* trailer_without_type = trailer + 1;
|
2014-08-25 21:22:05 +00:00
|
|
|
switch (r->table_options.checksum) {
|
2014-05-01 18:09:32 +00:00
|
|
|
case kNoChecksum:
|
|
|
|
// we don't support no checksum yet
|
|
|
|
assert(false);
|
|
|
|
// intentional fallthrough in release binary
|
|
|
|
case kCRC32c: {
|
|
|
|
auto crc = crc32c::Value(block_contents.data(), block_contents.size());
|
|
|
|
crc = crc32c::Extend(crc, trailer, 1); // Extend to cover block type
|
|
|
|
EncodeFixed32(trailer_without_type, crc32c::Mask(crc));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kxxHash: {
|
|
|
|
void* xxh = XXH32_init(0);
|
2014-11-11 21:47:22 +00:00
|
|
|
XXH32_update(xxh, block_contents.data(),
|
|
|
|
static_cast<uint32_t>(block_contents.size()));
|
2014-05-01 18:09:32 +00:00
|
|
|
XXH32_update(xxh, trailer, 1); // Extend to cover block type
|
|
|
|
EncodeFixed32(trailer_without_type, XXH32_digest(xxh));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
|
2013-09-02 06:23:40 +00:00
|
|
|
if (r->status.ok()) {
|
|
|
|
r->status = InsertBlockInCache(block_contents, type, handle);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
if (r->status.ok()) {
|
|
|
|
r->offset += block_contents.size() + kBlockTrailerSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
Status BlockBasedTableBuilder::status() const {
|
2011-03-18 22:37:00 +00:00
|
|
|
return rep_->status;
|
|
|
|
}
|
|
|
|
|
2013-09-02 06:23:40 +00:00
|
|
|
static void DeleteCachedBlock(const Slice& key, void* value) {
|
|
|
|
Block* block = reinterpret_cast<Block*>(value);
|
|
|
|
delete block;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Make a copy of the block contents and insert into compressed block cache
|
|
|
|
//
|
|
|
|
Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents,
|
2014-07-16 13:45:49 +00:00
|
|
|
const CompressionType type,
|
|
|
|
const BlockHandle* handle) {
|
2013-09-02 06:23:40 +00:00
|
|
|
Rep* r = rep_;
|
2014-08-25 21:22:05 +00:00
|
|
|
Cache* block_cache_compressed = r->table_options.block_cache_compressed.get();
|
2013-09-02 06:23:40 +00:00
|
|
|
|
|
|
|
if (type != kNoCompression && block_cache_compressed != nullptr) {
|
|
|
|
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
|
|
|
size_t size = block_contents.size();
|
|
|
|
|
2014-09-17 22:08:19 +00:00
|
|
|
std::unique_ptr<char[]> ubuf(new char[size + 1]);
|
2014-08-15 22:05:09 +00:00
|
|
|
memcpy(ubuf.get(), block_contents.data(), size);
|
2014-07-16 13:45:49 +00:00
|
|
|
ubuf[size] = type;
|
2013-09-02 06:23:40 +00:00
|
|
|
|
2014-08-15 22:05:09 +00:00
|
|
|
BlockContents results(std::move(ubuf), size, true, type);
|
2013-09-02 06:23:40 +00:00
|
|
|
|
2014-08-15 22:05:09 +00:00
|
|
|
Block* block = new Block(std::move(results));
|
2013-09-02 06:23:40 +00:00
|
|
|
|
|
|
|
// make cache key by appending the file offset to the cache prefix id
|
|
|
|
char* end = EncodeVarint64(
|
|
|
|
r->compressed_cache_key_prefix +
|
|
|
|
r->compressed_cache_key_prefix_size,
|
|
|
|
handle->offset());
|
|
|
|
Slice key(r->compressed_cache_key_prefix, static_cast<size_t>
|
|
|
|
(end - r->compressed_cache_key_prefix));
|
|
|
|
|
|
|
|
// Insert into compressed block cache.
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 18:48:09 +00:00
|
|
|
cache_handle = block_cache_compressed->Insert(
|
|
|
|
key, block, block->usable_size(), &DeleteCachedBlock);
|
2013-09-02 06:23:40 +00:00
|
|
|
block_cache_compressed->Release(cache_handle);
|
|
|
|
|
|
|
|
// Invalidate OS cache.
|
2014-11-13 19:39:30 +00:00
|
|
|
r->file->InvalidateCache(static_cast<size_t>(r->offset), size);
|
2013-09-02 06:23:40 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
Status BlockBasedTableBuilder::Finish() {
|
2011-03-18 22:37:00 +00:00
|
|
|
Rep* r = rep_;
|
2013-11-08 05:27:21 +00:00
|
|
|
bool empty_data_block = r->data_block.empty();
|
2011-03-18 22:37:00 +00:00
|
|
|
Flush();
|
|
|
|
assert(!r->closed);
|
|
|
|
r->closed = true;
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2014-09-08 17:37:05 +00:00
|
|
|
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle;
|
2012-04-17 15:36:46 +00:00
|
|
|
// Write filter block
|
2013-03-01 02:04:58 +00:00
|
|
|
if (ok() && r->filter_block != nullptr) {
|
2013-11-20 00:29:42 +00:00
|
|
|
auto filter_contents = r->filter_block->Finish();
|
|
|
|
r->props.filter_size = filter_contents.size();
|
|
|
|
WriteRawBlock(filter_contents, kNoCompression, &filter_block_handle);
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
|
2013-11-20 00:29:42 +00:00
|
|
|
// To make sure properties block is able to keep the accurate size of index
|
2013-10-10 18:43:24 +00:00
|
|
|
// block, we will finish writing all index entries here and flush them
|
|
|
|
// to storage after metaindex block is written.
|
2013-11-08 05:27:21 +00:00
|
|
|
if (ok() && !empty_data_block) {
|
2014-05-15 21:09:03 +00:00
|
|
|
r->index_builder->AddIndexEntry(
|
|
|
|
&r->last_key, nullptr /* no next data block */, r->pending_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
IndexBuilder::IndexBlocks index_blocks;
|
|
|
|
auto s = r->index_builder->Finish(&index_blocks);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
2013-10-10 18:43:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write meta blocks and metaindex block with the following order.
|
|
|
|
// 1. [meta block: filter]
|
2014-05-15 21:09:03 +00:00
|
|
|
// 2. [other meta blocks]
|
|
|
|
// 3. [meta block: properties]
|
|
|
|
// 4. [metaindex block]
|
|
|
|
// write meta blocks
|
|
|
|
MetaIndexBuilder meta_index_builder;
|
|
|
|
for (const auto& item : index_blocks.meta_blocks) {
|
|
|
|
BlockHandle block_handle;
|
|
|
|
WriteBlock(item.second, &block_handle);
|
|
|
|
meta_index_builder.Add(item.first, block_handle);
|
|
|
|
}
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
if (ok()) {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (r->filter_block != nullptr) {
|
2013-10-10 18:43:24 +00:00
|
|
|
// Add mapping from "<filter_block_prefix>.Name" to location
|
|
|
|
// of filter data.
|
2014-09-08 17:37:05 +00:00
|
|
|
std::string key;
|
|
|
|
if (r->filter_block->IsBlockBased()) {
|
|
|
|
key = BlockBasedTable::kFilterBlockPrefix;
|
|
|
|
} else {
|
|
|
|
key = BlockBasedTable::kFullFilterBlockPrefix;
|
|
|
|
}
|
2014-08-25 21:22:05 +00:00
|
|
|
key.append(r->table_options.filter_policy->Name());
|
2014-05-15 21:09:03 +00:00
|
|
|
meta_index_builder.Add(key, filter_block_handle);
|
2013-10-10 18:43:24 +00:00
|
|
|
}
|
|
|
|
|
2013-11-20 00:29:42 +00:00
|
|
|
// Write properties block.
|
2013-10-10 18:43:24 +00:00
|
|
|
{
|
2013-12-05 21:09:13 +00:00
|
|
|
PropertyBlockBuilder property_block_builder;
|
2014-08-25 21:22:05 +00:00
|
|
|
r->props.filter_policy_name = r->table_options.filter_policy != nullptr ?
|
|
|
|
r->table_options.filter_policy->Name() : "";
|
2013-11-20 00:29:42 +00:00
|
|
|
r->props.index_size =
|
2014-03-01 02:19:07 +00:00
|
|
|
r->index_builder->EstimatedSize() + kBlockTrailerSize;
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2013-12-05 21:09:13 +00:00
|
|
|
// Add basic properties
|
|
|
|
property_block_builder.AddTableProperty(r->props);
|
2013-10-16 18:50:50 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
// Add use collected properties
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 19:30:55 +00:00
|
|
|
NotifyCollectTableCollectorsOnFinish(r->table_properties_collectors,
|
2014-09-04 23:18:36 +00:00
|
|
|
r->ioptions.info_log,
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 19:30:55 +00:00
|
|
|
&property_block_builder);
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2013-11-20 00:29:42 +00:00
|
|
|
BlockHandle properties_block_handle;
|
2013-12-05 21:09:13 +00:00
|
|
|
WriteRawBlock(
|
|
|
|
property_block_builder.Finish(),
|
|
|
|
kNoCompression,
|
|
|
|
&properties_block_handle
|
2013-10-10 18:43:24 +00:00
|
|
|
);
|
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
meta_index_builder.Add(kPropertiesBlock, properties_block_handle);
|
2013-12-05 21:09:13 +00:00
|
|
|
} // end of properties block writing
|
2014-05-15 21:09:03 +00:00
|
|
|
} // meta blocks
|
2012-04-17 15:36:46 +00:00
|
|
|
|
|
|
|
// Write index block
|
2011-03-18 22:37:00 +00:00
|
|
|
if (ok()) {
|
2014-05-15 21:09:03 +00:00
|
|
|
// flush the meta index block
|
|
|
|
WriteRawBlock(meta_index_builder.Finish(), kNoCompression,
|
|
|
|
&metaindex_block_handle);
|
|
|
|
WriteBlock(index_blocks.index_block_contents, &index_block_handle);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
|
|
|
|
// Write footer
|
2011-03-18 22:37:00 +00:00
|
|
|
if (ok()) {
|
2014-05-01 18:09:32 +00:00
|
|
|
// No need to write out new footer if we're using default checksum.
|
|
|
|
// We're writing legacy magic number because we want old versions of RocksDB
|
|
|
|
// be able to read files generated with new release (just in case if
|
|
|
|
// somebody wants to roll back after an upgrade)
|
|
|
|
// TODO(icanadi) at some point in the future, when we're absolutely sure
|
|
|
|
// nobody will roll back to RocksDB 2.x versions, retire the legacy magic
|
|
|
|
// number and always write new table files with new magic number
|
2015-01-13 22:33:04 +00:00
|
|
|
bool legacy = (r->table_options.format_version == 0);
|
|
|
|
// this is guaranteed by BlockBasedTableBuilder's constructor
|
|
|
|
assert(r->table_options.checksum == kCRC32c ||
|
|
|
|
r->table_options.format_version != 0);
|
2014-05-01 18:09:32 +00:00
|
|
|
Footer footer(legacy ? kLegacyBlockBasedTableMagicNumber
|
2015-01-13 22:33:04 +00:00
|
|
|
: kBlockBasedTableMagicNumber,
|
|
|
|
r->table_options.format_version);
|
2011-03-18 22:37:00 +00:00
|
|
|
footer.set_metaindex_handle(metaindex_block_handle);
|
|
|
|
footer.set_index_handle(index_block_handle);
|
2014-08-25 21:22:05 +00:00
|
|
|
footer.set_checksum(r->table_options.checksum);
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string footer_encoding;
|
|
|
|
footer.EncodeTo(&footer_encoding);
|
|
|
|
r->status = r->file->Append(footer_encoding);
|
|
|
|
if (r->status.ok()) {
|
|
|
|
r->offset += footer_encoding.size();
|
|
|
|
}
|
|
|
|
}
|
2013-11-20 00:29:42 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
return r->status;
|
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
void BlockBasedTableBuilder::Abandon() {
|
2011-03-18 22:37:00 +00:00
|
|
|
Rep* r = rep_;
|
|
|
|
assert(!r->closed);
|
|
|
|
r->closed = true;
|
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
uint64_t BlockBasedTableBuilder::NumEntries() const {
|
2013-11-20 00:29:42 +00:00
|
|
|
return rep_->props.num_entries;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
uint64_t BlockBasedTableBuilder::FileSize() const {
|
2011-03-18 22:37:00 +00:00
|
|
|
return rep_->offset;
|
|
|
|
}
|
|
|
|
|
2015-06-04 19:03:40 +00:00
|
|
|
bool BlockBasedTableBuilder::NeedCompact() const {
|
|
|
|
for (const auto& collector : rep_->table_properties_collectors) {
|
|
|
|
if (collector->NeedCompact()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-12 22:53:55 +00:00
|
|
|
TableProperties BlockBasedTableBuilder::GetTableProperties() const {
|
|
|
|
TableProperties ret = rep_->props;
|
|
|
|
for (const auto& collector : rep_->table_properties_collectors) {
|
|
|
|
for (const auto& prop : collector->GetReadableProperties()) {
|
|
|
|
ret.user_collected_properties.insert(prop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-05-15 21:09:03 +00:00
|
|
|
const std::string BlockBasedTable::kFilterBlockPrefix = "filter.";
|
2014-09-08 17:37:05 +00:00
|
|
|
const std::string BlockBasedTable::kFullFilterBlockPrefix = "fullfilter.";
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|