2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/version_set.h"
|
2014-02-26 22:16:23 +00:00
|
|
|
|
2014-05-14 19:13:50 +00:00
|
|
|
#define __STDC_FORMAT_MACROS
|
2014-02-26 22:16:23 +00:00
|
|
|
#include <inttypes.h>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <algorithm>
|
2014-01-22 19:44:53 +00:00
|
|
|
#include <map>
|
2014-01-29 23:26:43 +00:00
|
|
|
#include <set>
|
2013-06-14 05:09:08 +00:00
|
|
|
#include <climits>
|
2014-02-28 00:18:23 +00:00
|
|
|
#include <unordered_map>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <stdio.h>
|
2014-01-28 05:58:46 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/filename.h"
|
|
|
|
#include "db/log_reader.h"
|
|
|
|
#include "db/log_writer.h"
|
|
|
|
#include "db/memtable.h"
|
2013-12-03 02:34:05 +00:00
|
|
|
#include "db/merge_context.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/table_cache.h"
|
2014-01-16 00:22:34 +00:00
|
|
|
#include "db/compaction.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/merge_operator.h"
|
2014-01-28 05:58:46 +00:00
|
|
|
#include "table/table_reader.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/merger.h"
|
|
|
|
#include "table/two_level_iterator.h"
|
2014-02-14 00:28:21 +00:00
|
|
|
#include "table/format.h"
|
2014-04-25 19:23:07 +00:00
|
|
|
#include "table/plain_table_factory.h"
|
2014-02-14 00:28:21 +00:00
|
|
|
#include "table/meta_blocks.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/logging.h"
|
2013-06-05 18:06:21 +00:00
|
|
|
#include "util/stop_watch.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-07-17 20:56:24 +00:00
|
|
|
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
|
|
|
uint64_t sum = 0;
|
2012-08-27 07:50:26 +00:00
|
|
|
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
2011-10-05 23:30:28 +00:00
|
|
|
sum += files[i]->file_size;
|
|
|
|
}
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Version::~Version() {
|
|
|
|
assert(refs_ == 0);
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
// Remove from linked list
|
|
|
|
prev_->next_ = next_;
|
|
|
|
next_->prev_ = prev_;
|
|
|
|
|
|
|
|
// Drop references to files
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
2011-04-20 22:48:11 +00:00
|
|
|
for (size_t i = 0; i < files_[level].size(); i++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
FileMetaData* f = files_[level][i];
|
2011-05-21 02:17:43 +00:00
|
|
|
assert(f->refs > 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
f->refs--;
|
|
|
|
if (f->refs <= 0) {
|
2014-01-07 04:29:17 +00:00
|
|
|
if (f->table_reader_handle) {
|
2014-02-06 23:42:16 +00:00
|
|
|
cfd_->table_cache()->ReleaseHandle(f->table_reader_handle);
|
2014-01-07 04:29:17 +00:00
|
|
|
f->table_reader_handle = nullptr;
|
|
|
|
}
|
2013-11-08 23:23:46 +00:00
|
|
|
vset_->obsolete_files_.push_back(f);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-06-23 02:30:03 +00:00
|
|
|
delete[] files_;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
int FindFileInRange(const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
const Slice& key,
|
|
|
|
uint32_t left,
|
|
|
|
uint32_t right) {
|
2011-06-22 02:36:45 +00:00
|
|
|
while (left < right) {
|
|
|
|
uint32_t mid = (left + right) / 2;
|
|
|
|
const FileMetaData* f = files[mid];
|
|
|
|
if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
|
|
|
|
// Key at "mid.largest" is < "target". Therefore all
|
|
|
|
// files at or before "mid" are uninteresting.
|
|
|
|
left = mid + 1;
|
|
|
|
} else {
|
|
|
|
// Key at "mid.largest" is >= "target". Therefore all files
|
|
|
|
// after "mid" are uninteresting.
|
|
|
|
right = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return right;
|
|
|
|
}
|
|
|
|
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
int FindFile(const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
const Slice& key) {
|
|
|
|
return FindFileInRange(icmp, files, key, 0, files.size());
|
|
|
|
}
|
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
static bool AfterFile(const Comparator* ucmp,
|
|
|
|
const Slice* user_key, const FileMetaData* f) {
|
2013-03-01 02:04:58 +00:00
|
|
|
// nullptr user_key occurs before all keys and is therefore never after *f
|
|
|
|
return (user_key != nullptr &&
|
2011-10-05 23:30:28 +00:00
|
|
|
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool BeforeFile(const Comparator* ucmp,
|
|
|
|
const Slice* user_key, const FileMetaData* f) {
|
2013-03-01 02:04:58 +00:00
|
|
|
// nullptr user_key occurs after all keys and is therefore never before *f
|
|
|
|
return (user_key != nullptr &&
|
2011-10-05 23:30:28 +00:00
|
|
|
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
|
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
bool SomeFileOverlapsRange(
|
|
|
|
const InternalKeyComparator& icmp,
|
2011-10-05 23:30:28 +00:00
|
|
|
bool disjoint_sorted_files,
|
2011-06-22 02:36:45 +00:00
|
|
|
const std::vector<FileMetaData*>& files,
|
2011-10-05 23:30:28 +00:00
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key) {
|
|
|
|
const Comparator* ucmp = icmp.user_comparator();
|
|
|
|
if (!disjoint_sorted_files) {
|
|
|
|
// Need to check against all files
|
2012-08-27 06:45:35 +00:00
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
2011-10-05 23:30:28 +00:00
|
|
|
const FileMetaData* f = files[i];
|
|
|
|
if (AfterFile(ucmp, smallest_user_key, f) ||
|
|
|
|
BeforeFile(ucmp, largest_user_key, f)) {
|
|
|
|
// No overlap
|
|
|
|
} else {
|
|
|
|
return true; // Overlap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Binary search over file list
|
|
|
|
uint32_t index = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (smallest_user_key != nullptr) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// Find the earliest possible internal key for smallest_user_key
|
|
|
|
InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
|
|
|
|
index = FindFile(icmp, files, small.Encode());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index >= files.size()) {
|
|
|
|
// beginning of range is after all files, so no overlap.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return !BeforeFile(ucmp, largest_user_key, files[index]);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2014-04-02 01:36:18 +00:00
|
|
|
namespace {
|
|
|
|
// Used for LevelFileNumIterator to pass "block handle" value,
|
|
|
|
// which actually means file information in this iterator.
|
|
|
|
// It contains subset of fields of FileMetaData, that is sufficient
|
|
|
|
// for table cache to use.
|
|
|
|
struct EncodedFileMetaData {
|
|
|
|
uint64_t number; // file number
|
|
|
|
uint64_t file_size; // file size
|
2014-04-17 22:14:04 +00:00
|
|
|
TableReader* table_reader; // cached table reader
|
2014-04-02 01:36:18 +00:00
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// An internal iterator. For a given version/level pair, yields
|
|
|
|
// information about the files in the level. For a given entry, key()
|
|
|
|
// is the largest key that occurs in the file, and value() is an
|
2011-03-28 20:43:44 +00:00
|
|
|
// 16-byte value containing the file number and file size, both
|
|
|
|
// encoded using EncodeFixed64.
|
2011-03-18 22:37:00 +00:00
|
|
|
class Version::LevelFileNumIterator : public Iterator {
|
|
|
|
public:
|
2011-05-21 02:17:43 +00:00
|
|
|
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
2011-03-18 22:37:00 +00:00
|
|
|
const std::vector<FileMetaData*>* flist)
|
2011-05-21 02:17:43 +00:00
|
|
|
: icmp_(icmp),
|
2011-03-18 22:37:00 +00:00
|
|
|
flist_(flist),
|
|
|
|
index_(flist->size()) { // Marks as invalid
|
|
|
|
}
|
|
|
|
virtual bool Valid() const {
|
|
|
|
return index_ < flist_->size();
|
|
|
|
}
|
|
|
|
virtual void Seek(const Slice& target) {
|
2011-06-22 02:36:45 +00:00
|
|
|
index_ = FindFile(icmp_, *flist_, target);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual void SeekToFirst() { index_ = 0; }
|
|
|
|
virtual void SeekToLast() {
|
|
|
|
index_ = flist_->empty() ? 0 : flist_->size() - 1;
|
|
|
|
}
|
|
|
|
virtual void Next() {
|
|
|
|
assert(Valid());
|
|
|
|
index_++;
|
|
|
|
}
|
|
|
|
virtual void Prev() {
|
|
|
|
assert(Valid());
|
|
|
|
if (index_ == 0) {
|
|
|
|
index_ = flist_->size(); // Marks as invalid
|
|
|
|
} else {
|
|
|
|
index_--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice key() const {
|
|
|
|
assert(Valid());
|
|
|
|
return (*flist_)[index_]->largest.Encode();
|
|
|
|
}
|
|
|
|
Slice value() const {
|
|
|
|
assert(Valid());
|
2014-04-02 01:36:18 +00:00
|
|
|
auto* file_meta = (*flist_)[index_];
|
|
|
|
current_value_.number = file_meta->number;
|
|
|
|
current_value_.file_size = file_meta->file_size;
|
2014-04-17 22:14:04 +00:00
|
|
|
current_value_.table_reader = file_meta->table_reader;
|
2014-04-02 01:36:18 +00:00
|
|
|
return Slice(reinterpret_cast<const char*>(¤t_value_),
|
|
|
|
sizeof(EncodedFileMetaData));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual Status status() const { return Status::OK(); }
|
|
|
|
private:
|
|
|
|
const InternalKeyComparator icmp_;
|
|
|
|
const std::vector<FileMetaData*>* const flist_;
|
2011-04-20 22:48:11 +00:00
|
|
|
uint32_t index_;
|
2014-04-02 01:36:18 +00:00
|
|
|
mutable EncodedFileMetaData current_value_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2014-04-25 19:22:23 +00:00
|
|
|
class Version::LevelFileIteratorState : public TwoLevelIteratorState {
|
|
|
|
public:
|
|
|
|
LevelFileIteratorState(TableCache* table_cache,
|
|
|
|
const ReadOptions& read_options, const EnvOptions& env_options,
|
|
|
|
const InternalKeyComparator& icomparator, bool for_compaction,
|
|
|
|
bool prefix_enabled)
|
|
|
|
: TwoLevelIteratorState(prefix_enabled),
|
|
|
|
table_cache_(table_cache), read_options_(read_options),
|
|
|
|
env_options_(env_options), icomparator_(icomparator),
|
|
|
|
for_compaction_(for_compaction) {}
|
|
|
|
|
|
|
|
Iterator* NewSecondaryIterator(const Slice& meta_handle) override {
|
|
|
|
if (meta_handle.size() != sizeof(EncodedFileMetaData)) {
|
|
|
|
return NewErrorIterator(
|
|
|
|
Status::Corruption("FileReader invoked with unexpected value"));
|
|
|
|
} else {
|
|
|
|
const EncodedFileMetaData* encoded_meta =
|
|
|
|
reinterpret_cast<const EncodedFileMetaData*>(meta_handle.data());
|
|
|
|
FileMetaData meta(encoded_meta->number, encoded_meta->file_size);
|
|
|
|
meta.table_reader = encoded_meta->table_reader;
|
|
|
|
return table_cache_->NewIterator(read_options_, env_options_,
|
|
|
|
icomparator_, meta, nullptr /* don't need reference to table*/,
|
|
|
|
for_compaction_);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-04-25 19:22:23 +00:00
|
|
|
bool PrefixMayMatch(const Slice& internal_key) override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
TableCache* table_cache_;
|
|
|
|
const ReadOptions read_options_;
|
|
|
|
const EnvOptions& env_options_;
|
|
|
|
const InternalKeyComparator& icomparator_;
|
|
|
|
bool for_compaction_;
|
|
|
|
};
|
2013-08-23 21:49:57 +00:00
|
|
|
|
2014-02-14 00:28:21 +00:00
|
|
|
Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props) {
|
2014-02-15 01:02:10 +00:00
|
|
|
auto table_cache = cfd_->table_cache();
|
2014-03-11 21:52:17 +00:00
|
|
|
auto options = cfd_->options();
|
2014-02-14 00:28:21 +00:00
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
|
|
|
for (const auto& file_meta : files_[level]) {
|
|
|
|
auto fname = TableFileName(vset_->dbname_, file_meta->number);
|
|
|
|
// 1. If the table is already present in table cache, load table
|
|
|
|
// properties from there.
|
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
|
|
|
Status s = table_cache->GetTableProperties(
|
2014-02-15 01:02:10 +00:00
|
|
|
vset_->storage_options_, cfd_->internal_comparator(), *file_meta,
|
|
|
|
&table_properties, true /* no io */);
|
2014-02-14 00:28:21 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
props->insert({fname, table_properties});
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We only ignore error type `Incomplete` since it's by design that we
|
|
|
|
// disallow table when it's not in table cache.
|
|
|
|
if (!s.IsIncomplete()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. Table is not present in table cache, we'll read the table properties
|
|
|
|
// directly from the properties block in the file.
|
|
|
|
std::unique_ptr<RandomAccessFile> file;
|
2014-02-15 01:02:10 +00:00
|
|
|
s = options->env->NewRandomAccessFile(fname, &file,
|
|
|
|
vset_->storage_options_);
|
2014-02-14 00:28:21 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
TableProperties* raw_table_properties;
|
|
|
|
// By setting the magic number to kInvalidTableMagicNumber, we can by
|
|
|
|
// pass the magic number check in the footer.
|
|
|
|
s = ReadTableProperties(
|
|
|
|
file.get(), file_meta->file_size,
|
|
|
|
Footer::kInvalidTableMagicNumber /* table's magic number */,
|
|
|
|
vset_->env_, options->info_log.get(), &raw_table_properties);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
RecordTick(options->statistics.get(),
|
|
|
|
NUMBER_DIRECT_LOAD_TABLE_PROPERTIES);
|
|
|
|
|
|
|
|
props->insert({fname, std::shared_ptr<const TableProperties>(
|
|
|
|
raw_table_properties)});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-04-25 19:23:07 +00:00
|
|
|
void Version::AddIterators(const ReadOptions& read_options,
|
2013-06-07 22:35:17 +00:00
|
|
|
const EnvOptions& soptions,
|
2011-03-18 22:37:00 +00:00
|
|
|
std::vector<Iterator*>* iters) {
|
|
|
|
// Merge all level zero files together since they may overlap
|
2013-08-21 05:58:16 +00:00
|
|
|
for (const FileMetaData* file : files_[0]) {
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
iters->push_back(cfd_->table_cache()->NewIterator(
|
2014-04-25 19:23:07 +00:00
|
|
|
read_options, soptions, cfd_->internal_comparator(), *file));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// For levels > 0, we can use a concatenating iterator that sequentially
|
|
|
|
// walks through the non-overlapping files in the level, opening them
|
|
|
|
// lazily.
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 1; level < num_levels_; level++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!files_[level].empty()) {
|
2014-04-25 19:22:23 +00:00
|
|
|
iters->push_back(NewTwoLevelIterator(new LevelFileIteratorState(
|
2014-04-25 19:23:07 +00:00
|
|
|
cfd_->table_cache(), read_options, soptions,
|
2014-04-25 19:22:23 +00:00
|
|
|
cfd_->internal_comparator(), false /* for_compaction */,
|
|
|
|
cfd_->options()->prefix_extractor != nullptr),
|
|
|
|
new LevelFileNumIterator(cfd_->internal_comparator(), &files_[level])));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Callback from TableCache::Get()
|
|
|
|
namespace {
|
|
|
|
enum SaverState {
|
|
|
|
kNotFound,
|
|
|
|
kFound,
|
|
|
|
kDeleted,
|
|
|
|
kCorrupt,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
kMerge // saver contains the current merge result (the operands)
|
2012-04-17 15:36:46 +00:00
|
|
|
};
|
|
|
|
struct Saver {
|
|
|
|
SaverState state;
|
|
|
|
const Comparator* ucmp;
|
|
|
|
Slice user_key;
|
2013-07-26 19:57:01 +00:00
|
|
|
bool* value_found; // Is value set correctly? Used by KeyMayExist
|
2012-04-17 15:36:46 +00:00
|
|
|
std::string* value;
|
2013-03-21 22:59:47 +00:00
|
|
|
const MergeOperator* merge_operator;
|
2013-12-03 02:34:05 +00:00
|
|
|
// the merge operations encountered;
|
|
|
|
MergeContext* merge_context;
|
2013-03-21 22:59:47 +00:00
|
|
|
Logger* logger;
|
2012-09-27 08:05:38 +00:00
|
|
|
bool didIO; // did we do any disk io?
|
2013-11-22 22:14:05 +00:00
|
|
|
Statistics* statistics;
|
2012-04-17 15:36:46 +00:00
|
|
|
};
|
|
|
|
}
|
2013-07-06 01:49:18 +00:00
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
// Called from TableCache::Get and Table::Get when file/block in which
|
|
|
|
// key may exist are not there in TableCache/BlockCache respectively. In this
|
|
|
|
// case we can't guarantee that key does not exist and are not permitted to do
|
|
|
|
// IO to be certain.Set the status=kFound and value_found=false to let the
|
|
|
|
// caller know that key may exist but is not there in memory
|
2013-07-06 01:49:18 +00:00
|
|
|
static void MarkKeyMayExist(void* arg) {
|
|
|
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
|
|
|
s->state = kFound;
|
2013-07-26 19:57:01 +00:00
|
|
|
if (s->value_found != nullptr) {
|
|
|
|
*(s->value_found) = false;
|
|
|
|
}
|
2013-07-06 01:49:18 +00:00
|
|
|
}
|
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
static bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
|
|
|
|
const Slice& v, bool didIO) {
|
2012-04-17 15:36:46 +00:00
|
|
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
2013-12-03 02:34:05 +00:00
|
|
|
MergeContext* merge_contex = s->merge_context;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
std::string merge_result; // temporary area for merge results later
|
|
|
|
|
2013-12-03 02:34:05 +00:00
|
|
|
assert(s != nullptr && merge_contex != nullptr);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
// TODO: didIO and Merge?
|
2012-09-27 08:05:38 +00:00
|
|
|
s->didIO = didIO;
|
2014-01-27 21:53:22 +00:00
|
|
|
if (s->ucmp->Compare(parsed_key.user_key, s->user_key) == 0) {
|
|
|
|
// Key matches. Process it
|
|
|
|
switch (parsed_key.type) {
|
|
|
|
case kTypeValue:
|
|
|
|
if (kNotFound == s->state) {
|
|
|
|
s->state = kFound;
|
|
|
|
s->value->assign(v.data(), v.size());
|
|
|
|
} else if (kMerge == s->state) {
|
|
|
|
assert(s->merge_operator != nullptr);
|
|
|
|
s->state = kFound;
|
|
|
|
if (!s->merge_operator->FullMerge(s->user_key, &v,
|
|
|
|
merge_contex->GetOperands(),
|
|
|
|
s->value, s->logger)) {
|
|
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
|
|
s->state = kCorrupt;
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
} else {
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
return false;
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
case kTypeDeletion:
|
|
|
|
if (kNotFound == s->state) {
|
|
|
|
s->state = kDeleted;
|
|
|
|
} else if (kMerge == s->state) {
|
|
|
|
s->state = kFound;
|
2013-12-03 02:34:05 +00:00
|
|
|
if (!s->merge_operator->FullMerge(s->user_key, nullptr,
|
|
|
|
merge_contex->GetOperands(),
|
|
|
|
s->value, s->logger)) {
|
2014-01-27 21:53:22 +00:00
|
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
|
|
s->state = kCorrupt;
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
} else {
|
2013-08-14 23:32:46 +00:00
|
|
|
assert(false);
|
2014-01-27 21:53:22 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case kTypeMerge:
|
|
|
|
assert(s->state == kNotFound || s->state == kMerge);
|
|
|
|
s->state = kMerge;
|
|
|
|
merge_contex->PushOperand(v);
|
2014-03-25 00:57:13 +00:00
|
|
|
return true;
|
2014-01-27 21:53:22 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
// s->state could be Corrupt, merge or notfound
|
|
|
|
|
|
|
|
return false;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2014-03-26 20:30:14 +00:00
|
|
|
namespace {
|
|
|
|
bool NewestFirst(FileMetaData* a, FileMetaData* b) {
|
2011-06-22 02:36:45 +00:00
|
|
|
return a->number > b->number;
|
|
|
|
}
|
2014-03-26 20:30:14 +00:00
|
|
|
bool NewestFirstBySeqNo(FileMetaData* a, FileMetaData* b) {
|
|
|
|
if (a->smallest_seqno != b->smallest_seqno) {
|
|
|
|
return a->smallest_seqno > b->smallest_seqno;
|
2013-06-14 05:09:08 +00:00
|
|
|
}
|
2014-03-26 20:30:14 +00:00
|
|
|
if (a->largest_seqno != b->largest_seqno) {
|
|
|
|
return a->largest_seqno > b->largest_seqno;
|
|
|
|
}
|
|
|
|
// Break ties by file number
|
|
|
|
return NewestFirst(a, b);
|
2013-06-14 05:09:08 +00:00
|
|
|
}
|
2014-03-26 20:30:14 +00:00
|
|
|
bool BySmallestKey(FileMetaData* a, FileMetaData* b,
|
|
|
|
const InternalKeyComparator* cmp) {
|
|
|
|
int r = cmp->Compare(a->smallest, b->smallest);
|
|
|
|
if (r != 0) {
|
|
|
|
return (r < 0);
|
|
|
|
}
|
|
|
|
// Break ties by file number
|
|
|
|
return (a->number < b->number);
|
2013-06-14 05:09:08 +00:00
|
|
|
}
|
2014-03-26 20:30:14 +00:00
|
|
|
} // anonymous namespace
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
Version::Version(ColumnFamilyData* cfd, VersionSet* vset,
|
|
|
|
uint64_t version_number)
|
|
|
|
: cfd_(cfd),
|
2014-04-17 21:07:05 +00:00
|
|
|
internal_comparator_((cfd == nullptr) ? nullptr
|
|
|
|
: &cfd->internal_comparator()),
|
|
|
|
user_comparator_((cfd == nullptr)
|
|
|
|
? nullptr
|
|
|
|
: internal_comparator_->user_comparator()),
|
|
|
|
table_cache_((cfd == nullptr) ? nullptr : cfd->table_cache()),
|
|
|
|
merge_operator_((cfd == nullptr) ? nullptr
|
|
|
|
: cfd->options()->merge_operator.get()),
|
|
|
|
info_log_((cfd == nullptr) ? nullptr : cfd->options()->info_log.get()),
|
|
|
|
db_statistics_((cfd == nullptr) ? nullptr
|
|
|
|
: cfd->options()->statistics.get()),
|
2014-01-31 23:30:27 +00:00
|
|
|
vset_(vset),
|
2014-01-16 00:15:43 +00:00
|
|
|
next_(this),
|
|
|
|
prev_(this),
|
|
|
|
refs_(0),
|
2014-02-03 20:08:33 +00:00
|
|
|
// cfd is nullptr if Version is dummy
|
|
|
|
num_levels_(cfd == nullptr ? 0 : cfd->NumberLevels()),
|
2014-01-16 00:15:43 +00:00
|
|
|
files_(new std::vector<FileMetaData*>[num_levels_]),
|
|
|
|
files_by_size_(num_levels_),
|
|
|
|
next_file_to_compact_by_size_(num_levels_),
|
2013-03-01 02:04:58 +00:00
|
|
|
file_to_compact_(nullptr),
|
2012-06-23 02:30:03 +00:00
|
|
|
file_to_compact_level_(-1),
|
2014-01-16 00:15:43 +00:00
|
|
|
compaction_score_(num_levels_),
|
|
|
|
compaction_level_(num_levels_),
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
version_number_(version_number),
|
|
|
|
file_indexer_(num_levels_, cfd == nullptr ? nullptr
|
|
|
|
: cfd->internal_comparator().user_comparator()) {
|
|
|
|
}
|
2012-06-23 02:30:03 +00:00
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
void Version::Get(const ReadOptions& options,
|
|
|
|
const LookupKey& k,
|
|
|
|
std::string* value,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
Status* status,
|
2013-12-03 02:34:05 +00:00
|
|
|
MergeContext* merge_context,
|
2013-03-21 22:59:47 +00:00
|
|
|
GetStats* stats,
|
2013-07-26 19:57:01 +00:00
|
|
|
bool* value_found) {
|
2011-06-22 02:36:45 +00:00
|
|
|
Slice ikey = k.internal_key();
|
|
|
|
Slice user_key = k.user_key();
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
assert(status->ok() || status->IsMergeInProgress());
|
|
|
|
Saver saver;
|
|
|
|
saver.state = status->ok()? kNotFound : kMerge;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.ucmp = user_comparator_;
|
2013-03-21 22:59:47 +00:00
|
|
|
saver.user_key = user_key;
|
2013-07-26 19:57:01 +00:00
|
|
|
saver.value_found = value_found;
|
2013-03-21 22:59:47 +00:00
|
|
|
saver.value = value;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.merge_operator = merge_operator_;
|
2013-12-03 02:34:05 +00:00
|
|
|
saver.merge_context = merge_context;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.logger = info_log_;
|
2013-03-21 22:59:47 +00:00
|
|
|
saver.didIO = false;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.statistics = db_statistics_;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
stats->seek_file = nullptr;
|
2011-06-22 02:36:45 +00:00
|
|
|
stats->seek_file_level = -1;
|
2013-03-01 02:04:58 +00:00
|
|
|
FileMetaData* last_file_read = nullptr;
|
2011-09-01 19:08:02 +00:00
|
|
|
int last_file_read_level = -1;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
// We can search level-by-level since entries never hop across
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
// levels. Therefore we are guaranteed that if we find data
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// in an smaller level, later levels are irrelevant (unless we
|
|
|
|
// are MergeInProgress).
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
|
|
|
|
int32_t search_left_bound = 0;
|
|
|
|
int32_t search_right_bound = FileIndexer::kLevelMaxIndex;
|
|
|
|
for (int level = 0; level < num_levels_; ++level) {
|
|
|
|
int num_files = files_[level].size();
|
|
|
|
if (num_files == 0) {
|
|
|
|
// When current level is empty, the search bound generated from upper
|
|
|
|
// level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is
|
|
|
|
// also empty.
|
|
|
|
assert(search_left_bound == 0);
|
|
|
|
assert(search_right_bound == -1 ||
|
|
|
|
search_right_bound == FileIndexer::kLevelMaxIndex);
|
|
|
|
// Since current level is empty, it will need to search all files in the
|
|
|
|
// next level
|
|
|
|
search_left_bound = 0;
|
|
|
|
search_right_bound = FileIndexer::kLevelMaxIndex;
|
|
|
|
continue;
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
// Get the list of files to search in this level
|
|
|
|
FileMetaData* const* files = &files_[level][0];
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
|
|
|
|
// Some files may overlap each other. We find
|
|
|
|
// all files that overlap user_key and process them in order from
|
|
|
|
// newest to oldest. In the context of merge-operator,
|
|
|
|
// this can occur at any level. Otherwise, it only occurs
|
|
|
|
// at Level-0 (since Put/Deletes are always compacted into a single entry).
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
int32_t start_index;
|
2011-06-22 02:36:45 +00:00
|
|
|
if (level == 0) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// On Level-0, we read through all files to check for overlap.
|
|
|
|
start_index = 0;
|
2011-06-22 02:36:45 +00:00
|
|
|
} else {
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
// On Level-n (n>=1), files are sorted. Binary search to find the earliest
|
|
|
|
// file whose largest key >= ikey. Search left bound and right bound are
|
|
|
|
// used to narrow the range.
|
|
|
|
if (search_left_bound == search_right_bound) {
|
|
|
|
start_index = search_left_bound;
|
|
|
|
} else if (search_left_bound < search_right_bound) {
|
|
|
|
if (search_right_bound == FileIndexer::kLevelMaxIndex) {
|
|
|
|
search_right_bound = num_files - 1;
|
|
|
|
}
|
|
|
|
start_index = FindFileInRange(cfd_->internal_comparator(),
|
|
|
|
files_[level], ikey, search_left_bound, search_right_bound);
|
|
|
|
} else {
|
|
|
|
// search_left_bound > search_right_bound, key does not exist in this
|
|
|
|
// level. Since no comparision is done in this level, it will need to
|
|
|
|
// search all files in the next level.
|
|
|
|
search_left_bound = 0;
|
|
|
|
search_right_bound = FileIndexer::kLevelMaxIndex;
|
|
|
|
continue;
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
}
|
2013-12-09 22:28:26 +00:00
|
|
|
// Traverse each relevant file to find the desired key
|
|
|
|
#ifndef NDEBUG
|
|
|
|
FileMetaData* prev_file = nullptr;
|
|
|
|
#endif
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
|
|
|
|
for (int32_t i = start_index; i < num_files;) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
FileMetaData* f = files[i];
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
// Check if key is within a file's range. If search left bound and right
|
|
|
|
// bound point to the same find, we are sure key falls in range.
|
|
|
|
assert(level == 0 || i == start_index ||
|
|
|
|
user_comparator_->Compare(user_key, f->smallest.user_key()) <= 0);
|
|
|
|
|
|
|
|
int cmp_smallest = user_comparator_->Compare(user_key, f->smallest.user_key());
|
|
|
|
int cmp_largest = -1;
|
|
|
|
if (cmp_smallest >= 0) {
|
|
|
|
cmp_largest = user_comparator_->Compare(user_key, f->largest.user_key());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup file search bound for the next level based on the comparison
|
|
|
|
// results
|
|
|
|
if (level > 0) {
|
|
|
|
file_indexer_.GetNextLevelIndex(level, i, cmp_smallest, cmp_largest,
|
|
|
|
&search_left_bound, &search_right_bound);
|
|
|
|
}
|
|
|
|
// Key falls out of current file's range
|
|
|
|
if (cmp_smallest < 0 || cmp_largest > 0) {
|
|
|
|
if (level == 0) {
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
} else {
|
2013-12-09 22:28:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2013-06-14 05:09:08 +00:00
|
|
|
}
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
#ifndef NDEBUG
|
2013-12-09 22:28:26 +00:00
|
|
|
// Sanity check to make sure that the files are correctly sorted
|
|
|
|
if (prev_file) {
|
|
|
|
if (level != 0) {
|
2014-04-17 21:07:05 +00:00
|
|
|
int comp_sign =
|
|
|
|
internal_comparator_->Compare(prev_file->largest, f->smallest);
|
2013-12-09 22:28:26 +00:00
|
|
|
assert(comp_sign < 0);
|
|
|
|
} else {
|
|
|
|
// level == 0, the current file cannot be newer than the previous one.
|
2014-01-31 23:30:27 +00:00
|
|
|
if (cfd_->options()->compaction_style == kCompactionStyleUniversal) {
|
2013-12-09 22:28:26 +00:00
|
|
|
assert(!NewestFirstBySeqNo(f, prev_file));
|
|
|
|
} else {
|
|
|
|
assert(!NewestFirst(f, prev_file));
|
|
|
|
}
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
}
|
2013-12-09 22:28:26 +00:00
|
|
|
prev_file = f;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
#endif
|
2012-09-27 08:05:38 +00:00
|
|
|
bool tableIO = false;
|
2014-04-17 21:07:05 +00:00
|
|
|
*status = table_cache_->Get(options, *internal_comparator_, *f, ikey,
|
|
|
|
&saver, SaveValue, &tableIO, MarkKeyMayExist);
|
2013-03-21 22:59:47 +00:00
|
|
|
// TODO: examine the behavior for corrupted key
|
|
|
|
if (!status->ok()) {
|
|
|
|
return;
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
2012-09-27 08:05:38 +00:00
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
if (last_file_read != nullptr && stats->seek_file == nullptr) {
|
2012-09-27 08:05:38 +00:00
|
|
|
// We have had more than one seek for this read. Charge the 1st file.
|
|
|
|
stats->seek_file = last_file_read;
|
|
|
|
stats->seek_file_level = last_file_read_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we did any IO as part of the read, then we remember it because
|
|
|
|
// it is a possible candidate for seek-based compaction. saver.didIO
|
|
|
|
// is true if the block had to be read in from storage and was not
|
|
|
|
// pre-exisiting in the block cache. Also, if this file was not pre-
|
|
|
|
// existing in the table cache and had to be freshly opened that needed
|
|
|
|
// the index blocks to be read-in, then tableIO is true. One thing
|
|
|
|
// to note is that the index blocks are not part of the block cache.
|
|
|
|
if (saver.didIO || tableIO) {
|
|
|
|
last_file_read = f;
|
|
|
|
last_file_read_level = level;
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
switch (saver.state) {
|
|
|
|
case kNotFound:
|
|
|
|
break; // Keep searching in other files
|
|
|
|
case kFound:
|
2013-03-21 22:59:47 +00:00
|
|
|
return;
|
2012-04-17 15:36:46 +00:00
|
|
|
case kDeleted:
|
2013-12-26 21:49:04 +00:00
|
|
|
*status = Status::NotFound(); // Use empty error message for speed
|
2013-03-21 22:59:47 +00:00
|
|
|
return;
|
2012-04-17 15:36:46 +00:00
|
|
|
case kCorrupt:
|
2013-03-21 22:59:47 +00:00
|
|
|
*status = Status::Corruption("corrupted key for ", user_key);
|
|
|
|
return;
|
|
|
|
case kMerge:
|
|
|
|
break;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
if (level > 0 && cmp_largest < 0) {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
++i;
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
if (kMerge == saver.state) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// merge_operands are in saver and we hit the beginning of the key history
|
|
|
|
// do a final merge of nullptr and operands;
|
2014-04-17 21:07:05 +00:00
|
|
|
if (merge_operator_->FullMerge(user_key, nullptr,
|
|
|
|
saver.merge_context->GetOperands(), value,
|
|
|
|
info_log_)) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
*status = Status::OK();
|
|
|
|
} else {
|
2014-04-17 21:07:05 +00:00
|
|
|
RecordTick(db_statistics_, NUMBER_MERGE_FAILURES);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
*status = Status::Corruption("could not perform end-of-key merge for ",
|
|
|
|
user_key);
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
} else {
|
2013-12-26 21:49:04 +00:00
|
|
|
*status = Status::NotFound(); // Use an empty error message for speed
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Version::UpdateStats(const GetStats& stats) {
|
|
|
|
FileMetaData* f = stats.seek_file;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (f != nullptr) {
|
2011-06-22 02:36:45 +00:00
|
|
|
f->allowed_seeks--;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
|
2011-06-22 02:36:45 +00:00
|
|
|
file_to_compact_ = f;
|
|
|
|
file_to_compact_level_ = stats.seek_file_level;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-03-19 23:52:26 +00:00
|
|
|
void Version::ComputeCompactionScore(
|
|
|
|
std::vector<uint64_t>& size_being_compacted) {
|
2014-01-16 00:23:36 +00:00
|
|
|
double max_score = 0;
|
|
|
|
int max_score_level = 0;
|
|
|
|
|
|
|
|
int num_levels_to_check =
|
2014-01-31 23:30:27 +00:00
|
|
|
(cfd_->options()->compaction_style != kCompactionStyleUniversal)
|
2014-01-16 00:23:36 +00:00
|
|
|
? NumberLevels() - 1
|
|
|
|
: 1;
|
|
|
|
|
|
|
|
for (int level = 0; level < num_levels_to_check; level++) {
|
|
|
|
double score;
|
|
|
|
if (level == 0) {
|
|
|
|
// We treat level-0 specially by bounding the number of files
|
|
|
|
// instead of number of bytes for two reasons:
|
|
|
|
//
|
|
|
|
// (1) With larger write-buffer sizes, it is nice not to do too
|
|
|
|
// many level-0 compactions.
|
|
|
|
//
|
|
|
|
// (2) The files in level-0 are merged on every read and
|
|
|
|
// therefore we wish to avoid too many files when the individual
|
|
|
|
// file size is small (perhaps because of a small write-buffer
|
|
|
|
// setting, or very high compression ratios, or lots of
|
|
|
|
// overwrites/deletions).
|
|
|
|
int numfiles = 0;
|
|
|
|
for (unsigned int i = 0; i < files_[level].size(); i++) {
|
|
|
|
if (!files_[level][i]->being_compacted) {
|
|
|
|
numfiles++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are slowing down writes, then we better compact that first
|
2014-01-31 23:30:27 +00:00
|
|
|
if (numfiles >= cfd_->options()->level0_stop_writes_trigger) {
|
2014-01-16 00:23:36 +00:00
|
|
|
score = 1000000;
|
2014-01-31 23:30:27 +00:00
|
|
|
} else if (numfiles >= cfd_->options()->level0_slowdown_writes_trigger) {
|
2014-01-16 00:23:36 +00:00
|
|
|
score = 10000;
|
|
|
|
} else {
|
|
|
|
score = static_cast<double>(numfiles) /
|
2014-01-31 23:30:27 +00:00
|
|
|
cfd_->options()->level0_file_num_compaction_trigger;
|
2014-01-16 00:23:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Compute the ratio of current size to size limit.
|
|
|
|
const uint64_t level_bytes =
|
|
|
|
TotalFileSize(files_[level]) - size_being_compacted[level];
|
2014-01-31 23:30:27 +00:00
|
|
|
score = static_cast<double>(level_bytes) /
|
|
|
|
cfd_->compaction_picker()->MaxBytesForLevel(level);
|
2014-01-16 00:23:36 +00:00
|
|
|
if (max_score < score) {
|
|
|
|
max_score = score;
|
|
|
|
max_score_level = level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
compaction_level_[level] = level;
|
|
|
|
compaction_score_[level] = score;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the max compaction score in levels 1 to n-1
|
|
|
|
max_compaction_score_ = max_score;
|
|
|
|
max_compaction_score_level_ = max_score_level;
|
|
|
|
|
|
|
|
// sort all the levels based on their score. Higher scores get listed
|
|
|
|
// first. Use bubble sort because the number of entries are small.
|
|
|
|
for (int i = 0; i < NumberLevels() - 2; i++) {
|
|
|
|
for (int j = i + 1; j < NumberLevels() - 1; j++) {
|
|
|
|
if (compaction_score_[i] < compaction_score_[j]) {
|
|
|
|
double score = compaction_score_[i];
|
|
|
|
int level = compaction_level_[i];
|
|
|
|
compaction_score_[i] = compaction_score_[j];
|
|
|
|
compaction_level_[i] = compaction_level_[j];
|
|
|
|
compaction_score_[j] = score;
|
|
|
|
compaction_level_[j] = level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Compator that is used to sort files based on their size
|
|
|
|
// In normal mode: descending size
|
|
|
|
bool CompareSizeDescending(const Version::Fsize& first,
|
|
|
|
const Version::Fsize& second) {
|
|
|
|
return (first.file->file_size > second.file->file_size);
|
|
|
|
}
|
|
|
|
// A static compator used to sort files based on their seqno
|
|
|
|
// In universal style : descending seqno
|
|
|
|
bool CompareSeqnoDescending(const Version::Fsize& first,
|
|
|
|
const Version::Fsize& second) {
|
|
|
|
if (first.file->smallest_seqno > second.file->smallest_seqno) {
|
|
|
|
assert(first.file->largest_seqno > second.file->largest_seqno);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
assert(first.file->largest_seqno <= second.file->largest_seqno);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-01-16 07:12:31 +00:00
|
|
|
} // anonymous namespace
|
2014-01-16 00:23:36 +00:00
|
|
|
|
|
|
|
void Version::UpdateFilesBySize() {
|
|
|
|
// No need to sort the highest level because it is never compacted.
|
|
|
|
int max_level =
|
2014-01-31 23:30:27 +00:00
|
|
|
(cfd_->options()->compaction_style == kCompactionStyleUniversal)
|
2014-01-16 00:23:36 +00:00
|
|
|
? NumberLevels()
|
|
|
|
: NumberLevels() - 1;
|
|
|
|
|
|
|
|
for (int level = 0; level < max_level; level++) {
|
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
|
|
|
std::vector<int>& files_by_size = files_by_size_[level];
|
|
|
|
assert(files_by_size.size() == 0);
|
|
|
|
|
|
|
|
// populate a temp vector for sorting based on size
|
|
|
|
std::vector<Fsize> temp(files.size());
|
|
|
|
for (unsigned int i = 0; i < files.size(); i++) {
|
|
|
|
temp[i].index = i;
|
|
|
|
temp[i].file = files[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort the top number_of_files_to_sort_ based on file size
|
2014-01-31 23:30:27 +00:00
|
|
|
if (cfd_->options()->compaction_style == kCompactionStyleUniversal) {
|
2014-01-16 00:23:36 +00:00
|
|
|
int num = temp.size();
|
|
|
|
std::partial_sort(temp.begin(), temp.begin() + num, temp.end(),
|
|
|
|
CompareSeqnoDescending);
|
|
|
|
} else {
|
|
|
|
int num = Version::number_of_files_to_sort_;
|
|
|
|
if (num > (int)temp.size()) {
|
|
|
|
num = temp.size();
|
|
|
|
}
|
|
|
|
std::partial_sort(temp.begin(), temp.begin() + num, temp.end(),
|
|
|
|
CompareSizeDescending);
|
|
|
|
}
|
|
|
|
assert(temp.size() == files.size());
|
|
|
|
|
|
|
|
// initialize files_by_size_
|
|
|
|
for (unsigned int i = 0; i < temp.size(); i++) {
|
|
|
|
files_by_size.push_back(temp[i].index);
|
|
|
|
}
|
|
|
|
next_file_to_compact_by_size_[level] = 0;
|
|
|
|
assert(files_[level].size() == files_by_size_[level].size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void Version::Ref() {
|
|
|
|
++refs_;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:56:36 +00:00
|
|
|
bool Version::Unref() {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(refs_ >= 1);
|
|
|
|
--refs_;
|
|
|
|
if (refs_ == 0) {
|
2011-05-21 02:17:43 +00:00
|
|
|
delete this;
|
2013-12-11 19:56:36 +00:00
|
|
|
return true;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-12-11 19:56:36 +00:00
|
|
|
return false;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-27 17:59:00 +00:00
|
|
|
bool Version::NeedsCompaction() const {
|
|
|
|
if (file_to_compact_ != nullptr) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// In universal compaction case, this check doesn't really
|
|
|
|
// check the compaction condition, but checks num of files threshold
|
|
|
|
// only. We are not going to miss any compaction opportunity
|
|
|
|
// but it's likely that more compactions are scheduled but
|
|
|
|
// ending up with nothing to do. We can improve it later.
|
|
|
|
// TODO(sdong): improve this function to be accurate for universal
|
|
|
|
// compactions.
|
|
|
|
int num_levels_to_check =
|
2014-01-31 23:30:27 +00:00
|
|
|
(cfd_->options()->compaction_style != kCompactionStyleUniversal)
|
|
|
|
? NumberLevels() - 1
|
|
|
|
: 1;
|
2014-01-27 17:59:00 +00:00
|
|
|
for (int i = 0; i < num_levels_to_check; i++) {
|
|
|
|
if (compaction_score_[i] >= 1) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
bool Version::OverlapInLevel(int level,
|
2011-10-05 23:30:28 +00:00
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key) {
|
2014-01-31 23:30:27 +00:00
|
|
|
return SomeFileOverlapsRange(cfd_->internal_comparator(), (level > 0),
|
|
|
|
files_[level], smallest_user_key,
|
|
|
|
largest_user_key);
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int Version::PickLevelForMemTableOutput(
|
|
|
|
const Slice& smallest_user_key,
|
|
|
|
const Slice& largest_user_key) {
|
|
|
|
int level = 0;
|
|
|
|
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
|
|
|
|
// Push to next level if there is no overlap in next level,
|
|
|
|
// and the #bytes overlapping in the level after that are limited.
|
|
|
|
InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek);
|
|
|
|
InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0));
|
|
|
|
std::vector<FileMetaData*> overlaps;
|
2014-01-31 23:30:27 +00:00
|
|
|
int max_mem_compact_level = cfd_->options()->max_mem_compaction_level;
|
2012-06-23 02:30:03 +00:00
|
|
|
while (max_mem_compact_level > 0 && level < max_mem_compact_level) {
|
2011-10-05 23:30:28 +00:00
|
|
|
if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
|
|
|
|
break;
|
|
|
|
}
|
2014-01-16 00:15:43 +00:00
|
|
|
if (level + 2 >= num_levels_) {
|
2012-10-31 18:47:18 +00:00
|
|
|
level++;
|
|
|
|
break;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
2011-10-05 23:30:28 +00:00
|
|
|
GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
|
2013-07-17 20:56:24 +00:00
|
|
|
const uint64_t sum = TotalFileSize(overlaps);
|
2014-01-31 23:30:27 +00:00
|
|
|
if (sum > cfd_->compaction_picker()->MaxGrandParentOverlapBytes(level)) {
|
2011-10-05 23:30:28 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
level++;
|
|
|
|
}
|
|
|
|
}
|
2012-06-23 02:30:03 +00:00
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
return level;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
2012-11-29 00:42:36 +00:00
|
|
|
// If hint_index is specified, then it points to a file in the
|
2012-11-06 17:06:16 +00:00
|
|
|
// overlapping range.
|
|
|
|
// The file_index returns a pointer to any file in an overlapping range.
|
2014-01-10 23:12:34 +00:00
|
|
|
void Version::GetOverlappingInputs(int level,
|
|
|
|
const InternalKey* begin,
|
|
|
|
const InternalKey* end,
|
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index,
|
|
|
|
int* file_index) {
|
2011-10-05 23:30:28 +00:00
|
|
|
inputs->clear();
|
|
|
|
Slice user_begin, user_end;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr) {
|
2011-10-05 23:30:28 +00:00
|
|
|
user_begin = begin->user_key();
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
if (end != nullptr) {
|
2011-10-05 23:30:28 +00:00
|
|
|
user_end = end->user_key();
|
|
|
|
}
|
Assertion failure while running with unit tests with OPT=-g
Summary:
When we expand the range of keys for a level 0 compaction, we
need to invoke ParentFilesInCompaction() only once for the
entire range of keys that is being compacted. We were invoking
it for each file that was being compacted, but this triggers
an assertion because each file's range were contiguous but
non-overlapping.
I renamed ParentFilesInCompaction to ParentRangeInCompaction
to adequately represent that it is the range-of-keys and
not individual files that we compact in a single compaction run.
Here is the assertion that is fixed by this patch.
db_test: db/version_set.cc:585: void leveldb::Version::ExtendOverlappingInputs(int, const leveldb::Slice&, const leveldb::Slice&, std::vector<leveldb::FileMetaData*, std::allocator<leveldb::FileMetaData*> >*, int): Assertion `user_cmp->Compare(flimit, user_begin) >= 0' failed.
Test Plan: make clean check OPT=-g
Reviewers: sheki
Reviewed By: sheki
CC: MarkCallaghan, emayanke, leveldb
Differential Revision: https://reviews.facebook.net/D6963
2012-11-26 09:49:50 +00:00
|
|
|
if (file_index) {
|
|
|
|
*file_index = -1;
|
|
|
|
}
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr && end != nullptr && level > 0) {
|
2012-11-06 17:06:16 +00:00
|
|
|
GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs,
|
|
|
|
hint_index, file_index);
|
2012-11-05 07:47:06 +00:00
|
|
|
return;
|
|
|
|
}
|
2011-10-31 17:22:06 +00:00
|
|
|
for (size_t i = 0; i < files_[level].size(); ) {
|
|
|
|
FileMetaData* f = files_[level][i++];
|
|
|
|
const Slice file_start = f->smallest.user_key();
|
|
|
|
const Slice file_limit = f->largest.user_key();
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// "f" is completely before specified range; skip it
|
2013-03-01 02:04:58 +00:00
|
|
|
} else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// "f" is completely after specified range; skip it
|
|
|
|
} else {
|
|
|
|
inputs->push_back(f);
|
2011-10-31 17:22:06 +00:00
|
|
|
if (level == 0) {
|
|
|
|
// Level-0 files may overlap each other. So check if the newly
|
|
|
|
// added file has expanded the range. If so, restart search.
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
|
2011-10-31 17:22:06 +00:00
|
|
|
user_begin = file_start;
|
|
|
|
inputs->clear();
|
|
|
|
i = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
} else if (end != nullptr
|
|
|
|
&& user_cmp->Compare(file_limit, user_end) > 0) {
|
2011-10-31 17:22:06 +00:00
|
|
|
user_end = file_limit;
|
|
|
|
inputs->clear();
|
|
|
|
i = 0;
|
|
|
|
}
|
2012-11-06 17:06:16 +00:00
|
|
|
} else if (file_index) {
|
Assertion failure while running with unit tests with OPT=-g
Summary:
When we expand the range of keys for a level 0 compaction, we
need to invoke ParentFilesInCompaction() only once for the
entire range of keys that is being compacted. We were invoking
it for each file that was being compacted, but this triggers
an assertion because each file's range were contiguous but
non-overlapping.
I renamed ParentFilesInCompaction to ParentRangeInCompaction
to adequately represent that it is the range-of-keys and
not individual files that we compact in a single compaction run.
Here is the assertion that is fixed by this patch.
db_test: db/version_set.cc:585: void leveldb::Version::ExtendOverlappingInputs(int, const leveldb::Slice&, const leveldb::Slice&, std::vector<leveldb::FileMetaData*, std::allocator<leveldb::FileMetaData*> >*, int): Assertion `user_cmp->Compare(flimit, user_begin) >= 0' failed.
Test Plan: make clean check OPT=-g
Reviewers: sheki
Reviewed By: sheki
CC: MarkCallaghan, emayanke, leveldb
Differential Revision: https://reviews.facebook.net/D6963
2012-11-26 09:49:50 +00:00
|
|
|
*file_index = i-1;
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2012-11-05 07:47:06 +00:00
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
|
|
|
// Employ binary search to find at least one file that overlaps the
|
|
|
|
// specified range. From that file, iterate backwards and
|
|
|
|
// forwards to find all overlapping files.
|
|
|
|
void Version::GetOverlappingInputsBinarySearch(
|
|
|
|
int level,
|
|
|
|
const Slice& user_begin,
|
|
|
|
const Slice& user_end,
|
2012-11-06 17:06:16 +00:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index,
|
|
|
|
int* file_index) {
|
2012-11-05 07:47:06 +00:00
|
|
|
assert(level > 0);
|
|
|
|
int min = 0;
|
|
|
|
int mid = 0;
|
|
|
|
int max = files_[level].size() -1;
|
|
|
|
bool foundOverlap = false;
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
2012-11-06 17:06:16 +00:00
|
|
|
|
|
|
|
// if the caller already knows the index of a file that has overlap,
|
|
|
|
// then we can skip the binary search.
|
|
|
|
if (hint_index != -1) {
|
|
|
|
mid = hint_index;
|
|
|
|
foundOverlap = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!foundOverlap && min <= max) {
|
2012-11-05 07:47:06 +00:00
|
|
|
mid = (min + max)/2;
|
|
|
|
FileMetaData* f = files_[level][mid];
|
|
|
|
const Slice file_start = f->smallest.user_key();
|
|
|
|
const Slice file_limit = f->largest.user_key();
|
|
|
|
if (user_cmp->Compare(file_limit, user_begin) < 0) {
|
|
|
|
min = mid + 1;
|
|
|
|
} else if (user_cmp->Compare(user_end, file_start) < 0) {
|
|
|
|
max = mid - 1;
|
|
|
|
} else {
|
|
|
|
foundOverlap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2012-11-05 07:47:06 +00:00
|
|
|
// If there were no overlapping files, return immediately.
|
|
|
|
if (!foundOverlap) {
|
|
|
|
return;
|
|
|
|
}
|
2012-11-06 17:06:16 +00:00
|
|
|
// returns the index where an overlap is found
|
|
|
|
if (file_index) {
|
|
|
|
*file_index = mid;
|
|
|
|
}
|
2012-11-05 07:47:06 +00:00
|
|
|
ExtendOverlappingInputs(level, user_begin, user_end, inputs, mid);
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2012-11-05 07:47:06 +00:00
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
|
|
|
// The midIndex specifies the index of at least one file that
|
|
|
|
// overlaps the specified range. From that file, iterate backward
|
|
|
|
// and forward to find all overlapping files.
|
|
|
|
void Version::ExtendOverlappingInputs(
|
|
|
|
int level,
|
|
|
|
const Slice& user_begin,
|
|
|
|
const Slice& user_end,
|
|
|
|
std::vector<FileMetaData*>* inputs,
|
2013-03-15 01:32:01 +00:00
|
|
|
unsigned int midIndex) {
|
2012-11-05 07:47:06 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
2012-11-06 17:06:16 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
{
|
|
|
|
// assert that the file at midIndex overlaps with the range
|
|
|
|
assert(midIndex < files_[level].size());
|
|
|
|
FileMetaData* f = files_[level][midIndex];
|
|
|
|
const Slice fstart = f->smallest.user_key();
|
|
|
|
const Slice flimit = f->largest.user_key();
|
|
|
|
if (user_cmp->Compare(fstart, user_begin) >= 0) {
|
|
|
|
assert(user_cmp->Compare(fstart, user_end) <= 0);
|
|
|
|
} else {
|
|
|
|
assert(user_cmp->Compare(flimit, user_begin) >= 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2012-12-31 06:18:52 +00:00
|
|
|
int startIndex = midIndex + 1;
|
|
|
|
int endIndex = midIndex;
|
2013-01-14 20:39:24 +00:00
|
|
|
int count __attribute__((unused)) = 0;
|
2012-11-05 07:47:06 +00:00
|
|
|
|
|
|
|
// check backwards from 'mid' to lower indices
|
2012-12-31 06:18:52 +00:00
|
|
|
for (int i = midIndex; i >= 0 ; i--) {
|
2012-11-05 07:47:06 +00:00
|
|
|
FileMetaData* f = files_[level][i];
|
|
|
|
const Slice file_limit = f->largest.user_key();
|
|
|
|
if (user_cmp->Compare(file_limit, user_begin) >= 0) {
|
2012-12-31 06:18:52 +00:00
|
|
|
startIndex = i;
|
|
|
|
assert((count++, true));
|
2012-11-05 07:47:06 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// check forward from 'mid+1' to higher indices
|
2012-12-31 06:18:52 +00:00
|
|
|
for (unsigned int i = midIndex+1; i < files_[level].size(); i++) {
|
2012-11-05 07:47:06 +00:00
|
|
|
FileMetaData* f = files_[level][i];
|
|
|
|
const Slice file_start = f->smallest.user_key();
|
|
|
|
if (user_cmp->Compare(file_start, user_end) <= 0) {
|
2012-12-31 06:18:52 +00:00
|
|
|
assert((count++, true));
|
|
|
|
endIndex = i;
|
2012-11-05 07:47:06 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-12-31 06:18:52 +00:00
|
|
|
assert(count == endIndex - startIndex + 1);
|
|
|
|
|
|
|
|
// insert overlapping files into vector
|
|
|
|
for (int i = startIndex; i <= endIndex; i++) {
|
|
|
|
FileMetaData* f = files_[level][i];
|
2013-01-08 20:00:13 +00:00
|
|
|
inputs->push_back(f);
|
2012-12-31 06:18:52 +00:00
|
|
|
}
|
2012-11-05 07:47:06 +00:00
|
|
|
}
|
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// Returns true iff the first or last file in inputs contains
|
|
|
|
// an overlapping user key to the file "just outside" of it (i.e.
|
|
|
|
// just after the last file, or just before the first file)
|
|
|
|
// REQUIRES: "*inputs" is a sorted list of non-overlapping files
|
|
|
|
bool Version::HasOverlappingUserKey(
|
|
|
|
const std::vector<FileMetaData*>* inputs,
|
|
|
|
int level) {
|
|
|
|
|
|
|
|
// If inputs empty, there is no overlap.
|
|
|
|
// If level == 0, it is assumed that all needed files were already included.
|
|
|
|
if (inputs->empty() || level == 0){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
|
|
|
const size_t kNumFiles = files.size();
|
|
|
|
|
|
|
|
// Check the last file in inputs against the file after it
|
2014-01-31 23:30:27 +00:00
|
|
|
size_t last_file = FindFile(cfd_->internal_comparator(), files,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
inputs->back()->largest.Encode());
|
|
|
|
assert(0 <= last_file && last_file < kNumFiles); // File should exist!
|
|
|
|
if (last_file < kNumFiles-1) { // If not the last file
|
|
|
|
const Slice last_key_in_input = files[last_file]->largest.user_key();
|
|
|
|
const Slice first_key_after = files[last_file+1]->smallest.user_key();
|
|
|
|
if (user_cmp->Compare(last_key_in_input, first_key_after) == 0) {
|
|
|
|
// The last user key in input overlaps with the next file's first key
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the first file in inputs against the file just before it
|
2014-01-31 23:30:27 +00:00
|
|
|
size_t first_file = FindFile(cfd_->internal_comparator(), files,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
inputs->front()->smallest.Encode());
|
|
|
|
assert(0 <= first_file && first_file <= last_file); // File should exist!
|
|
|
|
if (first_file > 0) { // If not first file
|
|
|
|
const Slice& first_key_in_input = files[first_file]->smallest.user_key();
|
|
|
|
const Slice& last_key_before = files[first_file-1]->largest.user_key();
|
|
|
|
if (user_cmp->Compare(first_key_in_input, last_key_before) == 0) {
|
|
|
|
// The first user key in input overlaps with the previous file's last key
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-01-16 00:18:04 +00:00
|
|
|
int64_t Version::NumLevelBytes(int level) const {
|
|
|
|
assert(level >= 0);
|
|
|
|
assert(level < NumberLevels());
|
|
|
|
return TotalFileSize(files_[level]);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Version::LevelSummary(LevelSummaryStorage* scratch) const {
|
|
|
|
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files[");
|
|
|
|
for (int i = 0; i < NumberLevels(); i++) {
|
|
|
|
int sz = sizeof(scratch->buffer) - len;
|
|
|
|
int ret = snprintf(scratch->buffer + len, sz, "%d ", int(files_[i].size()));
|
|
|
|
if (ret < 0 || ret >= sz) break;
|
|
|
|
len += ret;
|
|
|
|
}
|
2014-05-14 19:13:50 +00:00
|
|
|
if (len > 0) {
|
|
|
|
// overwrite the last space
|
|
|
|
--len;
|
|
|
|
}
|
2014-01-16 00:18:04 +00:00
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
|
|
|
|
return scratch->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Version::LevelFileSummary(FileSummaryStorage* scratch,
|
|
|
|
int level) const {
|
|
|
|
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
|
|
|
|
for (const auto& f : files_[level]) {
|
|
|
|
int sz = sizeof(scratch->buffer) - len;
|
2014-05-14 19:13:50 +00:00
|
|
|
char sztxt[16];
|
|
|
|
AppendHumanBytes(f->file_size, sztxt, 16);
|
2014-01-16 00:18:04 +00:00
|
|
|
int ret = snprintf(scratch->buffer + len, sz,
|
2014-05-14 19:13:50 +00:00
|
|
|
"#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ", f->number,
|
|
|
|
f->smallest_seqno, sztxt,
|
|
|
|
static_cast<int>(f->being_compacted));
|
2014-01-16 00:18:04 +00:00
|
|
|
if (ret < 0 || ret >= sz)
|
|
|
|
break;
|
|
|
|
len += ret;
|
|
|
|
}
|
2014-05-14 19:13:50 +00:00
|
|
|
// overwrite the last space (only if files_[level].size() is non-zero)
|
|
|
|
if (files_[level].size() && len > 0) {
|
|
|
|
--len;
|
|
|
|
}
|
2014-01-16 00:18:04 +00:00
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
|
|
|
|
return scratch->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t Version::MaxNextLevelOverlappingBytes() {
|
|
|
|
uint64_t result = 0;
|
|
|
|
std::vector<FileMetaData*> overlaps;
|
|
|
|
for (int level = 1; level < NumberLevels() - 1; level++) {
|
|
|
|
for (const auto& f : files_[level]) {
|
|
|
|
GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps);
|
|
|
|
const uint64_t sum = TotalFileSize(overlaps);
|
|
|
|
if (sum > result) {
|
|
|
|
result = sum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Version::AddLiveFiles(std::set<uint64_t>* live) {
|
|
|
|
for (int level = 0; level < NumberLevels(); level++) {
|
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
|
|
|
for (const auto& file : files) {
|
|
|
|
live->insert(file->number);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-16 02:28:36 +00:00
|
|
|
std::string Version::DebugString(bool hex) const {
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string r;
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
2011-06-22 02:36:45 +00:00
|
|
|
// E.g.,
|
|
|
|
// --- level 1 ---
|
|
|
|
// 17:123['a' .. 'd']
|
|
|
|
// 20:43['e' .. 'g']
|
|
|
|
r.append("--- level ");
|
2011-03-18 22:37:00 +00:00
|
|
|
AppendNumberTo(&r, level);
|
2012-10-19 21:00:53 +00:00
|
|
|
r.append(" --- version# ");
|
|
|
|
AppendNumberTo(&r, version_number_);
|
2011-06-22 02:36:45 +00:00
|
|
|
r.append(" ---\n");
|
2011-03-18 22:37:00 +00:00
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
2011-04-20 22:48:11 +00:00
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
r.push_back(' ');
|
|
|
|
AppendNumberTo(&r, files[i]->number);
|
|
|
|
r.push_back(':');
|
|
|
|
AppendNumberTo(&r, files[i]->file_size);
|
2011-10-05 23:30:28 +00:00
|
|
|
r.append("[");
|
2012-12-16 02:28:36 +00:00
|
|
|
r.append(files[i]->smallest.DebugString(hex));
|
2011-10-05 23:30:28 +00:00
|
|
|
r.append(" .. ");
|
2012-12-16 02:28:36 +00:00
|
|
|
r.append(files[i]->largest.DebugString(hex));
|
2011-10-05 23:30:28 +00:00
|
|
|
r.append("]\n");
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// this is used to batch writes to the manifest file
|
|
|
|
struct VersionSet::ManifestWriter {
|
|
|
|
Status status;
|
|
|
|
bool done;
|
|
|
|
port::CondVar cv;
|
2014-01-31 01:48:42 +00:00
|
|
|
ColumnFamilyData* cfd;
|
2012-10-19 21:00:53 +00:00
|
|
|
VersionEdit* edit;
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2014-01-31 01:48:42 +00:00
|
|
|
explicit ManifestWriter(port::Mutex* mu, ColumnFamilyData* cfd,
|
|
|
|
VersionEdit* e)
|
|
|
|
: done(false), cv(mu), cfd(cfd), edit(e) {}
|
2012-10-19 21:00:53 +00:00
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// A helper class so we can efficiently apply a whole sequence
|
|
|
|
// of edits to a particular state without creating intermediate
|
|
|
|
// Versions that contain full copies of the intermediate state.
|
|
|
|
class VersionSet::Builder {
|
|
|
|
private:
|
2014-03-26 20:30:14 +00:00
|
|
|
// Helper to sort v->files_
|
|
|
|
// kLevel0LevelCompaction -- NewestFirst
|
|
|
|
// kLevel0UniversalCompaction -- NewestFirstBySeqNo
|
|
|
|
// kLevelNon0 -- BySmallestKey
|
|
|
|
struct FileComparator {
|
|
|
|
enum SortMethod {
|
|
|
|
kLevel0LevelCompaction = 0,
|
|
|
|
kLevel0UniversalCompaction = 1,
|
|
|
|
kLevelNon0 = 2,
|
|
|
|
} sort_method;
|
2011-05-21 02:17:43 +00:00
|
|
|
const InternalKeyComparator* internal_comparator;
|
|
|
|
|
|
|
|
bool operator()(FileMetaData* f1, FileMetaData* f2) const {
|
2014-03-26 20:30:14 +00:00
|
|
|
switch (sort_method) {
|
|
|
|
case kLevel0LevelCompaction:
|
|
|
|
return NewestFirst(f1, f2);
|
|
|
|
case kLevel0UniversalCompaction:
|
|
|
|
return NewestFirstBySeqNo(f1, f2);
|
|
|
|
case kLevelNon0:
|
|
|
|
return BySmallestKey(f1, f2, internal_comparator);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2014-03-26 20:30:14 +00:00
|
|
|
assert(false);
|
2014-03-26 21:46:07 +00:00
|
|
|
return false;
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-03-26 20:30:14 +00:00
|
|
|
typedef std::set<FileMetaData*, FileComparator> FileSet;
|
2011-05-21 02:17:43 +00:00
|
|
|
struct LevelState {
|
|
|
|
std::set<uint64_t> deleted_files;
|
|
|
|
FileSet* added_files;
|
|
|
|
};
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
ColumnFamilyData* cfd_;
|
2011-05-21 02:17:43 +00:00
|
|
|
Version* base_;
|
2012-06-23 02:30:03 +00:00
|
|
|
LevelState* levels_;
|
2014-03-26 20:30:14 +00:00
|
|
|
FileComparator level_zero_cmp_;
|
|
|
|
FileComparator level_nonzero_cmp_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
public:
|
2014-02-28 19:25:38 +00:00
|
|
|
Builder(ColumnFamilyData* cfd) : cfd_(cfd), base_(cfd->current()) {
|
2011-05-21 02:17:43 +00:00
|
|
|
base_->Ref();
|
2014-02-28 19:25:38 +00:00
|
|
|
levels_ = new LevelState[base_->NumberLevels()];
|
2014-03-26 20:30:14 +00:00
|
|
|
level_zero_cmp_.sort_method =
|
2014-03-31 19:44:54 +00:00
|
|
|
(cfd_->options()->compaction_style == kCompactionStyleUniversal)
|
2014-03-26 20:30:14 +00:00
|
|
|
? FileComparator::kLevel0UniversalCompaction
|
|
|
|
: FileComparator::kLevel0LevelCompaction;
|
|
|
|
level_nonzero_cmp_.sort_method = FileComparator::kLevelNon0;
|
2014-03-31 19:44:54 +00:00
|
|
|
level_nonzero_cmp_.internal_comparator = &cfd->internal_comparator();
|
2014-03-26 20:30:14 +00:00
|
|
|
|
|
|
|
levels_[0].added_files = new FileSet(level_zero_cmp_);
|
2014-03-31 19:44:54 +00:00
|
|
|
for (int level = 1; level < base_->NumberLevels(); level++) {
|
2014-03-26 20:30:14 +00:00
|
|
|
levels_[level].added_files = new FileSet(level_nonzero_cmp_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~Builder() {
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < base_->NumberLevels(); level++) {
|
2011-07-19 23:36:47 +00:00
|
|
|
const FileSet* added = levels_[level].added_files;
|
|
|
|
std::vector<FileMetaData*> to_unref;
|
|
|
|
to_unref.reserve(added->size());
|
|
|
|
for (FileSet::const_iterator it = added->begin();
|
|
|
|
it != added->end(); ++it) {
|
|
|
|
to_unref.push_back(*it);
|
|
|
|
}
|
|
|
|
delete added;
|
2011-08-06 00:19:37 +00:00
|
|
|
for (uint32_t i = 0; i < to_unref.size(); i++) {
|
2011-05-21 02:17:43 +00:00
|
|
|
FileMetaData* f = to_unref[i];
|
2011-03-18 22:37:00 +00:00
|
|
|
f->refs--;
|
|
|
|
if (f->refs <= 0) {
|
2014-01-07 04:29:17 +00:00
|
|
|
if (f->table_reader_handle) {
|
2014-02-06 23:42:16 +00:00
|
|
|
cfd_->table_cache()->ReleaseHandle(f->table_reader_handle);
|
2014-01-07 04:29:17 +00:00
|
|
|
f->table_reader_handle = nullptr;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
delete f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-31 02:33:57 +00:00
|
|
|
|
2012-06-23 02:30:03 +00:00
|
|
|
delete[] levels_;
|
2011-05-21 02:17:43 +00:00
|
|
|
base_->Unref();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
void CheckConsistency(Version* v) {
|
|
|
|
#ifndef NDEBUG
|
2014-03-26 20:30:14 +00:00
|
|
|
// make sure the files are sorted correctly
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2014-03-26 20:30:14 +00:00
|
|
|
for (size_t i = 1; i < v->files_[level].size(); i++) {
|
|
|
|
auto f1 = v->files_[level][i - 1];
|
|
|
|
auto f2 = v->files_[level][i];
|
|
|
|
if (level == 0) {
|
|
|
|
assert(level_zero_cmp_(f1, f2));
|
2014-03-31 19:44:54 +00:00
|
|
|
if (cfd_->options()->compaction_style == kCompactionStyleUniversal) {
|
2014-03-26 20:30:14 +00:00
|
|
|
assert(f1->largest_seqno > f2->largest_seqno);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(level_nonzero_cmp_(f1, f2));
|
|
|
|
|
|
|
|
// Make sure there is no overlap in levels > 0
|
2014-03-31 19:44:54 +00:00
|
|
|
if (cfd_->internal_comparator().Compare(f1->largest, f2->smallest) >=
|
|
|
|
0) {
|
2012-10-19 21:00:53 +00:00
|
|
|
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
|
2014-03-26 20:30:14 +00:00
|
|
|
(f1->largest).DebugString().c_str(),
|
|
|
|
(f2->smallest).DebugString().c_str());
|
2012-10-19 21:00:53 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-01-14 23:27:09 +00:00
|
|
|
void CheckConsistencyForDeletes(VersionEdit* edit, unsigned int number,
|
|
|
|
int level) {
|
2012-11-13 18:30:00 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// a file to be deleted better exist in the previous version
|
|
|
|
bool found = false;
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int l = 0; !found && l < base_->NumberLevels(); l++) {
|
2012-11-13 18:30:00 +00:00
|
|
|
const std::vector<FileMetaData*>& base_files = base_->files_[l];
|
2013-03-15 01:32:01 +00:00
|
|
|
for (unsigned int i = 0; i < base_files.size(); i++) {
|
2012-11-13 18:30:00 +00:00
|
|
|
FileMetaData* f = base_files[i];
|
|
|
|
if (f->number == number) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// if the file did not exist in the previous version, then it
|
|
|
|
// is possibly moved from lower level to higher level in current
|
|
|
|
// version
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int l = level+1; !found && l < base_->NumberLevels(); l++) {
|
2012-11-13 18:30:00 +00:00
|
|
|
const FileSet* added = levels_[l].added_files;
|
2012-11-19 22:51:22 +00:00
|
|
|
for (FileSet::const_iterator added_iter = added->begin();
|
|
|
|
added_iter != added->end(); ++added_iter) {
|
|
|
|
FileMetaData* f = *added_iter;
|
|
|
|
if (f->number == number) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// maybe this file was added in a previous edit that was Applied
|
|
|
|
if (!found) {
|
|
|
|
const FileSet* added = levels_[level].added_files;
|
2012-11-13 18:30:00 +00:00
|
|
|
for (FileSet::const_iterator added_iter = added->begin();
|
|
|
|
added_iter != added->end(); ++added_iter) {
|
|
|
|
FileMetaData* f = *added_iter;
|
|
|
|
if (f->number == number) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(found);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Apply all of the edits in *edit to the current state.
|
|
|
|
void Apply(VersionEdit* edit) {
|
2012-10-19 21:00:53 +00:00
|
|
|
CheckConsistency(base_);
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Delete files
|
|
|
|
const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
|
2013-12-31 02:33:57 +00:00
|
|
|
for (const auto& del_file : del) {
|
|
|
|
const auto level = del_file.first;
|
|
|
|
const auto number = del_file.second;
|
2011-05-21 02:17:43 +00:00
|
|
|
levels_[level].deleted_files.insert(number);
|
2012-11-13 18:30:00 +00:00
|
|
|
CheckConsistencyForDeletes(edit, number, level);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add new files
|
2013-12-31 02:33:57 +00:00
|
|
|
for (const auto& new_file : edit->new_files_) {
|
|
|
|
const int level = new_file.first;
|
|
|
|
FileMetaData* f = new FileMetaData(new_file.second);
|
2011-03-18 22:37:00 +00:00
|
|
|
f->refs = 1;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
// We arrange to automatically compact this file after
|
|
|
|
// a certain number of seeks. Let's assume:
|
|
|
|
// (1) One seek costs 10ms
|
|
|
|
// (2) Writing or reading 1MB costs 10ms (100MB/s)
|
|
|
|
// (3) A compaction of 1MB does 25MB of IO:
|
|
|
|
// 1MB read from this level
|
|
|
|
// 10-12MB read from next level (boundaries may be misaligned)
|
|
|
|
// 10-12MB written to next level
|
|
|
|
// This implies that 25 seeks cost the same as the compaction
|
|
|
|
// of 1MB of data. I.e., one seek costs approximately the
|
|
|
|
// same as the compaction of 40KB of data. We are a little
|
|
|
|
// conservative and allow approximately one seek for every 16KB
|
|
|
|
// of data before triggering a compaction.
|
|
|
|
f->allowed_seeks = (f->file_size / 16384);
|
|
|
|
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
levels_[level].deleted_files.erase(f->number);
|
|
|
|
levels_[level].added_files->insert(f);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the current state in *v.
|
|
|
|
void SaveTo(Version* v) {
|
2012-10-19 21:00:53 +00:00
|
|
|
CheckConsistency(base_);
|
|
|
|
CheckConsistency(v);
|
2014-03-31 19:44:54 +00:00
|
|
|
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < base_->NumberLevels(); level++) {
|
2014-03-26 20:30:14 +00:00
|
|
|
const auto& cmp = (level == 0) ? level_zero_cmp_ : level_nonzero_cmp_;
|
2011-05-21 02:17:43 +00:00
|
|
|
// Merge the set of added files with the set of pre-existing files.
|
|
|
|
// Drop any deleted files. Store the result in *v.
|
2013-12-31 02:33:57 +00:00
|
|
|
const auto& base_files = base_->files_[level];
|
|
|
|
auto base_iter = base_files.begin();
|
|
|
|
auto base_end = base_files.end();
|
|
|
|
const auto& added_files = *levels_[level].added_files;
|
|
|
|
v->files_[level].reserve(base_files.size() + added_files.size());
|
|
|
|
|
|
|
|
for (const auto& added : added_files) {
|
2011-05-21 02:17:43 +00:00
|
|
|
// Add all smaller files listed in base_
|
2013-12-31 02:33:57 +00:00
|
|
|
for (auto bpos = std::upper_bound(base_iter, base_end, added, cmp);
|
2011-05-21 02:17:43 +00:00
|
|
|
base_iter != bpos;
|
|
|
|
++base_iter) {
|
|
|
|
MaybeAddFile(v, level, *base_iter);
|
|
|
|
}
|
|
|
|
|
2013-12-31 02:33:57 +00:00
|
|
|
MaybeAddFile(v, level, added);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add remaining base files
|
|
|
|
for (; base_iter != base_end; ++base_iter) {
|
|
|
|
MaybeAddFile(v, level, *base_iter);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2013-12-09 22:28:26 +00:00
|
|
|
|
2012-10-26 01:21:54 +00:00
|
|
|
CheckConsistency(v);
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
|
|
|
|
v->file_indexer_.UpdateIndex(v->files_);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
|
2014-01-07 04:29:17 +00:00
|
|
|
void LoadTableHandlers() {
|
2014-02-06 23:42:16 +00:00
|
|
|
for (int level = 0; level < cfd_->NumberLevels(); level++) {
|
2014-01-07 04:29:17 +00:00
|
|
|
for (auto& file_meta : *(levels_[level].added_files)) {
|
|
|
|
assert (!file_meta->table_reader_handle);
|
|
|
|
bool table_io;
|
2014-02-06 23:42:16 +00:00
|
|
|
cfd_->table_cache()->FindTable(
|
|
|
|
base_->vset_->storage_options_, cfd_->internal_comparator(),
|
|
|
|
file_meta->number, file_meta->file_size,
|
|
|
|
&file_meta->table_reader_handle, &table_io, false);
|
2014-04-17 21:07:05 +00:00
|
|
|
if (file_meta->table_reader_handle != nullptr) {
|
|
|
|
// Load table_reader
|
|
|
|
file_meta->table_reader =
|
|
|
|
cfd_->table_cache()->GetTableReaderFromHandle(
|
|
|
|
file_meta->table_reader_handle);
|
|
|
|
}
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
void MaybeAddFile(Version* v, int level, FileMetaData* f) {
|
|
|
|
if (levels_[level].deleted_files.count(f->number) > 0) {
|
|
|
|
// File is deleted: do nothing
|
|
|
|
} else {
|
2013-12-31 02:33:57 +00:00
|
|
|
auto* files = &v->files_[level];
|
2011-06-22 02:36:45 +00:00
|
|
|
if (level > 0 && !files->empty()) {
|
|
|
|
// Must not overlap
|
2014-01-31 23:30:27 +00:00
|
|
|
assert(cfd_->internal_comparator().Compare(
|
|
|
|
(*files)[files->size() - 1]->largest, f->smallest) < 0);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
f->refs++;
|
2011-06-22 02:36:45 +00:00
|
|
|
files->push_back(f);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-02-05 21:12:23 +00:00
|
|
|
VersionSet::VersionSet(const std::string& dbname, const DBOptions* options,
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
const EnvOptions& storage_options, Cache* table_cache)
|
|
|
|
: column_family_set_(new ColumnFamilySet(dbname, options, storage_options,
|
|
|
|
table_cache)),
|
2014-01-22 19:44:53 +00:00
|
|
|
env_(options->env),
|
2011-03-18 22:37:00 +00:00
|
|
|
dbname_(dbname),
|
|
|
|
options_(options),
|
|
|
|
next_file_number_(2),
|
|
|
|
manifest_file_number_(0), // Filled by Recover()
|
2014-03-18 04:50:15 +00:00
|
|
|
pending_manifest_file_number_(0),
|
2011-04-12 19:38:58 +00:00
|
|
|
last_sequence_(0),
|
|
|
|
prev_log_number_(0),
|
2013-01-11 01:18:50 +00:00
|
|
|
current_version_number_(0),
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_(0),
|
2013-03-15 00:00:04 +00:00
|
|
|
storage_options_(storage_options),
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
storage_options_compactions_(storage_options_) {}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
VersionSet::~VersionSet() {
|
2014-01-24 22:30:28 +00:00
|
|
|
// we need to delete column_family_set_ because its destructor depends on
|
|
|
|
// VersionSet
|
|
|
|
column_family_set_.reset();
|
2013-11-12 19:53:26 +00:00
|
|
|
for (auto file : obsolete_files_) {
|
|
|
|
delete file;
|
|
|
|
}
|
|
|
|
obsolete_files_.clear();
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
void VersionSet::AppendVersion(ColumnFamilyData* column_family_data,
|
|
|
|
Version* v) {
|
2011-05-21 02:17:43 +00:00
|
|
|
// Make "v" current
|
|
|
|
assert(v->refs_ == 0);
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* current = column_family_data->current();
|
|
|
|
assert(v != current);
|
|
|
|
if (current != nullptr) {
|
|
|
|
assert(current->refs_ > 0);
|
|
|
|
current->Unref();
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2014-01-29 21:28:50 +00:00
|
|
|
column_family_data->SetCurrent(v);
|
2011-05-21 02:17:43 +00:00
|
|
|
v->Ref();
|
|
|
|
|
|
|
|
// Append to linked list
|
2014-01-29 21:28:50 +00:00
|
|
|
v->prev_ = column_family_data->dummy_versions()->prev_;
|
|
|
|
v->next_ = column_family_data->dummy_versions();
|
2011-05-21 02:17:43 +00:00
|
|
|
v->prev_->next_ = v;
|
|
|
|
v->next_->prev_ = v;
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
|
2014-01-27 19:11:51 +00:00
|
|
|
VersionEdit* edit, port::Mutex* mu,
|
2014-02-28 22:05:11 +00:00
|
|
|
Directory* db_directory, bool new_descriptor_log,
|
|
|
|
const ColumnFamilyOptions* options) {
|
2012-10-19 21:00:53 +00:00
|
|
|
mu->AssertHeld();
|
2011-04-12 19:38:58 +00:00
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
// column_family_data can be nullptr only if this is column_family_add.
|
|
|
|
// in that case, we also need to specify ColumnFamilyOptions
|
|
|
|
if (column_family_data == nullptr) {
|
|
|
|
assert(edit->is_column_family_add_);
|
|
|
|
assert(options != nullptr);
|
2014-02-28 22:05:11 +00:00
|
|
|
}
|
2014-02-11 01:04:44 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// queue our request
|
2014-01-31 01:48:42 +00:00
|
|
|
ManifestWriter w(mu, column_family_data, edit);
|
2012-10-19 21:00:53 +00:00
|
|
|
manifest_writers_.push_back(&w);
|
|
|
|
while (!w.done && &w != manifest_writers_.front()) {
|
|
|
|
w.cv.Wait();
|
2011-04-12 19:38:58 +00:00
|
|
|
}
|
2012-10-19 21:00:53 +00:00
|
|
|
if (w.done) {
|
|
|
|
return w.status;
|
|
|
|
}
|
2014-03-11 21:52:17 +00:00
|
|
|
if (column_family_data != nullptr && column_family_data->IsDropped()) {
|
|
|
|
// if column family is dropped by the time we get here, no need to write
|
|
|
|
// anything to the manifest
|
|
|
|
manifest_writers_.pop_front();
|
|
|
|
// Notify new head of write queue
|
|
|
|
if (!manifest_writers_.empty()) {
|
|
|
|
manifest_writers_.front()->cv.Signal();
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
std::vector<VersionEdit*> batch_edits;
|
2014-02-28 22:05:11 +00:00
|
|
|
Version* v = nullptr;
|
|
|
|
std::unique_ptr<Builder> builder(nullptr);
|
2011-04-12 19:38:58 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// process all requests in the queue
|
|
|
|
ManifestWriter* last_writer = &w;
|
|
|
|
assert(!manifest_writers_.empty());
|
2012-11-07 23:11:37 +00:00
|
|
|
assert(manifest_writers_.front() == &w);
|
2014-02-28 22:05:11 +00:00
|
|
|
if (edit->IsColumnFamilyManipulation()) {
|
|
|
|
// no group commits for column family add or drop
|
2014-03-13 01:09:03 +00:00
|
|
|
LogAndApplyCFHelper(edit);
|
2014-02-28 22:05:11 +00:00
|
|
|
batch_edits.push_back(edit);
|
|
|
|
} else {
|
|
|
|
v = new Version(column_family_data, this, current_version_number_++);
|
|
|
|
builder.reset(new Builder(column_family_data));
|
|
|
|
for (const auto& writer : manifest_writers_) {
|
|
|
|
if (writer->edit->IsColumnFamilyManipulation() ||
|
|
|
|
writer->cfd->GetID() != column_family_data->GetID()) {
|
|
|
|
// no group commits for column family add or drop
|
|
|
|
// also, group commits across column families are not supported
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last_writer = writer;
|
|
|
|
LogAndApplyHelper(column_family_data, builder.get(), v, last_writer->edit,
|
|
|
|
mu);
|
|
|
|
batch_edits.push_back(last_writer->edit);
|
2014-01-31 01:48:42 +00:00
|
|
|
}
|
2014-02-28 22:05:11 +00:00
|
|
|
builder->SaveTo(v);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize new descriptor log file if necessary by creating
|
|
|
|
// a temporary file that contains a snapshot of the current version.
|
2012-09-24 21:01:01 +00:00
|
|
|
uint64_t new_manifest_file_size = 0;
|
2011-05-21 02:17:43 +00:00
|
|
|
Status s;
|
2013-01-11 01:18:50 +00:00
|
|
|
|
2014-03-18 04:50:15 +00:00
|
|
|
assert(pending_manifest_file_number_ == 0);
|
2013-02-19 04:08:12 +00:00
|
|
|
if (!descriptor_log_ ||
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_ > options_->max_manifest_file_size) {
|
2014-03-18 04:50:15 +00:00
|
|
|
pending_manifest_file_number_ = NewFileNumber();
|
|
|
|
batch_edits.back()->SetNextFile(next_file_number_);
|
2013-01-11 01:18:50 +00:00
|
|
|
new_descriptor_log = true;
|
2014-03-18 04:50:15 +00:00
|
|
|
} else {
|
|
|
|
pending_manifest_file_number_ = manifest_file_number_;
|
2013-01-11 01:18:50 +00:00
|
|
|
}
|
|
|
|
|
2013-11-08 23:23:46 +00:00
|
|
|
if (new_descriptor_log) {
|
2014-03-18 20:24:27 +00:00
|
|
|
// if we're writing out new snapshot make sure to persist max column family
|
2014-03-13 01:09:03 +00:00
|
|
|
if (column_family_set_->GetMaxColumnFamily() > 0) {
|
|
|
|
edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-07 04:29:17 +00:00
|
|
|
// Unlock during expensive operations. New writes cannot get here
|
2012-10-19 21:00:53 +00:00
|
|
|
// because &w is ensuring that all new writes get queued.
|
2011-09-01 19:08:02 +00:00
|
|
|
{
|
2014-02-28 22:05:11 +00:00
|
|
|
std::vector<uint64_t> size_being_compacted;
|
|
|
|
if (!edit->IsColumnFamilyManipulation()) {
|
|
|
|
size_being_compacted.resize(v->NumberLevels() - 1);
|
|
|
|
// calculate the amount of data being compacted at every level
|
|
|
|
column_family_data->compaction_picker()->SizeBeingCompacted(
|
|
|
|
size_being_compacted);
|
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
mu->Unlock();
|
2012-11-01 05:01:57 +00:00
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
if (!edit->IsColumnFamilyManipulation() && options_->max_open_files == -1) {
|
2014-01-07 04:29:17 +00:00
|
|
|
// unlimited table cache. Pre-load table handle now.
|
|
|
|
// Need to do it out of the mutex.
|
2014-02-28 22:05:11 +00:00
|
|
|
builder->LoadTableHandlers();
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
|
|
|
|
2013-11-01 19:32:27 +00:00
|
|
|
// This is fine because everything inside of this block is serialized --
|
|
|
|
// only one thread can be here at the same time
|
2014-03-13 01:09:03 +00:00
|
|
|
if (new_descriptor_log) {
|
2013-11-01 19:32:27 +00:00
|
|
|
unique_ptr<WritableFile> descriptor_file;
|
2014-03-18 04:50:15 +00:00
|
|
|
s = env_->NewWritableFile(
|
|
|
|
DescriptorFileName(dbname_, pending_manifest_file_number_),
|
2014-03-18 04:52:14 +00:00
|
|
|
&descriptor_file, env_->OptimizeForManifestWrite(storage_options_));
|
2013-11-01 19:32:27 +00:00
|
|
|
if (s.ok()) {
|
2014-03-26 16:37:53 +00:00
|
|
|
descriptor_file->SetPreallocationBlockSize(
|
|
|
|
options_->manifest_preallocation_size);
|
2013-11-01 19:32:27 +00:00
|
|
|
descriptor_log_.reset(new log::Writer(std::move(descriptor_file)));
|
|
|
|
s = WriteSnapshot(descriptor_log_.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
if (!edit->IsColumnFamilyManipulation()) {
|
2014-03-20 00:22:20 +00:00
|
|
|
// The calls to ComputeCompactionScore and UpdateFilesBySize are cpu-heavy
|
2014-02-28 22:05:11 +00:00
|
|
|
// and is best called outside the mutex.
|
2014-03-20 00:22:20 +00:00
|
|
|
v->ComputeCompactionScore(size_being_compacted);
|
2014-02-28 22:05:11 +00:00
|
|
|
v->UpdateFilesBySize();
|
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
|
|
|
|
// Write new record to MANIFEST log
|
2011-03-18 22:37:00 +00:00
|
|
|
if (s.ok()) {
|
2014-02-28 20:22:45 +00:00
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
std::string record;
|
|
|
|
e->EncodeTo(&record);
|
2012-10-19 21:00:53 +00:00
|
|
|
s = descriptor_log_->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
if (s.ok()) {
|
2012-08-27 19:10:26 +00:00
|
|
|
if (options_->use_fsync) {
|
2013-11-22 22:14:05 +00:00
|
|
|
StopWatch sw(env_, options_->statistics.get(),
|
|
|
|
MANIFEST_FILE_SYNC_MICROS);
|
2013-01-20 10:07:13 +00:00
|
|
|
s = descriptor_log_->file()->Fsync();
|
2012-08-27 19:10:26 +00:00
|
|
|
} else {
|
2013-11-22 22:14:05 +00:00
|
|
|
StopWatch sw(env_, options_->statistics.get(),
|
|
|
|
MANIFEST_FILE_SYNC_MICROS);
|
2013-01-20 10:07:13 +00:00
|
|
|
s = descriptor_log_->file()->Sync();
|
2012-08-27 19:10:26 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
}
|
2013-01-08 20:00:13 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
Log(options_->info_log, "MANIFEST write: %s\n", s.ToString().c_str());
|
2014-02-28 20:22:45 +00:00
|
|
|
bool all_records_in = true;
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
std::string record;
|
|
|
|
e->EncodeTo(&record);
|
2014-03-18 04:50:15 +00:00
|
|
|
if (!ManifestContains(pending_manifest_file_number_, record)) {
|
2014-02-28 20:22:45 +00:00
|
|
|
all_records_in = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (all_records_in) {
|
2013-01-08 20:00:13 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"MANIFEST contains log record despite error; advancing to new "
|
2013-03-06 21:28:54 +00:00
|
|
|
"version to prevent mismatch between in-memory and logged state"
|
|
|
|
" If paranoid is set, then the db is now in readonly mode.");
|
2013-01-08 20:00:13 +00:00
|
|
|
s = Status::OK();
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
// If we just created a new descriptor file, install it by writing a
|
|
|
|
// new CURRENT file that points to it.
|
2014-03-18 04:50:15 +00:00
|
|
|
if (s.ok() && new_descriptor_log) {
|
2014-05-06 21:51:33 +00:00
|
|
|
s = SetCurrentFile(env_, dbname_, pending_manifest_file_number_,
|
|
|
|
db_directory);
|
2014-03-18 04:50:15 +00:00
|
|
|
if (s.ok() && pending_manifest_file_number_ > manifest_file_number_) {
|
2013-11-08 23:23:46 +00:00
|
|
|
// delete old manifest file
|
|
|
|
Log(options_->info_log,
|
2014-03-18 04:50:15 +00:00
|
|
|
"Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n",
|
|
|
|
manifest_file_number_, pending_manifest_file_number_);
|
2013-11-08 23:23:46 +00:00
|
|
|
// we don't care about an error here, PurgeObsoleteFiles will take care
|
|
|
|
// of it later
|
2014-03-18 04:50:15 +00:00
|
|
|
env_->DeleteFile(DescriptorFileName(dbname_, manifest_file_number_));
|
2013-11-08 23:23:46 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
}
|
|
|
|
|
2014-01-29 00:02:51 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
// find offset in manifest file where this version is stored.
|
|
|
|
new_manifest_file_size = descriptor_log_->file()->GetFileSize();
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2013-11-07 19:31:56 +00:00
|
|
|
LogFlush(options_->info_log);
|
2011-09-01 19:08:02 +00:00
|
|
|
mu->Lock();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Install the new version
|
|
|
|
if (s.ok()) {
|
2014-02-28 22:05:11 +00:00
|
|
|
if (edit->is_column_family_add_) {
|
|
|
|
// no group commit on column family add
|
|
|
|
assert(batch_edits.size() == 1);
|
|
|
|
assert(options != nullptr);
|
|
|
|
CreateColumnFamily(*options, edit);
|
|
|
|
} else if (edit->is_column_family_drop_) {
|
|
|
|
assert(batch_edits.size() == 1);
|
2014-03-11 03:22:31 +00:00
|
|
|
column_family_data->SetDropped();
|
2014-02-28 22:05:11 +00:00
|
|
|
if (column_family_data->Unref()) {
|
|
|
|
delete column_family_data;
|
|
|
|
}
|
|
|
|
} else {
|
2014-03-14 20:11:41 +00:00
|
|
|
uint64_t max_log_number_in_batch = 0;
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
if (e->has_log_number_) {
|
|
|
|
max_log_number_in_batch =
|
|
|
|
std::max(max_log_number_in_batch, e->log_number_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (max_log_number_in_batch != 0) {
|
2014-04-15 16:57:25 +00:00
|
|
|
assert(column_family_data->GetLogNumber() <= max_log_number_in_batch);
|
2014-03-14 20:11:41 +00:00
|
|
|
column_family_data->SetLogNumber(max_log_number_in_batch);
|
|
|
|
}
|
2014-02-28 22:05:11 +00:00
|
|
|
AppendVersion(column_family_data, v);
|
|
|
|
}
|
|
|
|
|
2014-03-18 04:50:15 +00:00
|
|
|
manifest_file_number_ = pending_manifest_file_number_;
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_ = new_manifest_file_size;
|
2011-04-12 19:38:58 +00:00
|
|
|
prev_log_number_ = edit->prev_log_number_;
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
2014-04-25 13:51:16 +00:00
|
|
|
Log(options_->info_log, "Error in committing version %lu to [%s]",
|
|
|
|
(unsigned long)v->GetVersionNumber(),
|
|
|
|
column_family_data->GetName().c_str());
|
2011-03-18 22:37:00 +00:00
|
|
|
delete v;
|
2014-03-18 04:50:15 +00:00
|
|
|
if (new_descriptor_log) {
|
2013-01-20 10:07:13 +00:00
|
|
|
descriptor_log_.reset();
|
2014-03-18 04:50:15 +00:00
|
|
|
env_->DeleteFile(
|
|
|
|
DescriptorFileName(dbname_, pending_manifest_file_number_));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
2014-03-18 04:50:15 +00:00
|
|
|
pending_manifest_file_number_ = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// wake up all the waiting writers
|
|
|
|
while (true) {
|
|
|
|
ManifestWriter* ready = manifest_writers_.front();
|
|
|
|
manifest_writers_.pop_front();
|
|
|
|
if (ready != &w) {
|
|
|
|
ready->status = s;
|
|
|
|
ready->done = true;
|
|
|
|
ready->cv.Signal();
|
|
|
|
}
|
|
|
|
if (ready == last_writer) break;
|
|
|
|
}
|
|
|
|
// Notify new head of write queue
|
|
|
|
if (!manifest_writers_.empty()) {
|
|
|
|
manifest_writers_.front()->cv.Signal();
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) {
|
|
|
|
assert(edit->IsColumnFamilyManipulation());
|
|
|
|
edit->SetNextFile(next_file_number_);
|
|
|
|
edit->SetLastSequence(last_sequence_);
|
|
|
|
if (edit->is_column_family_drop_) {
|
|
|
|
// if we drop column family, we have to make sure to save max column family,
|
|
|
|
// so that we don't reuse existing ID
|
|
|
|
edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-31 01:48:42 +00:00
|
|
|
void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd, Builder* builder,
|
|
|
|
Version* v, VersionEdit* edit,
|
|
|
|
port::Mutex* mu) {
|
2012-10-19 21:00:53 +00:00
|
|
|
mu->AssertHeld();
|
2014-03-13 01:09:03 +00:00
|
|
|
assert(!edit->IsColumnFamilyManipulation());
|
2012-10-19 21:00:53 +00:00
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
if (edit->has_log_number_) {
|
|
|
|
assert(edit->log_number_ >= cfd->GetLogNumber());
|
2014-03-14 20:11:41 +00:00
|
|
|
assert(edit->log_number_ < next_file_number_);
|
2014-02-28 18:29:37 +00:00
|
|
|
}
|
2014-02-28 22:05:11 +00:00
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
if (!edit->has_prev_log_number_) {
|
|
|
|
edit->SetPrevLogNumber(prev_log_number_);
|
|
|
|
}
|
|
|
|
edit->SetNextFile(next_file_number_);
|
|
|
|
edit->SetLastSequence(last_sequence_);
|
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
builder->Apply(edit);
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
Status VersionSet::Recover(
|
2014-04-09 16:56:17 +00:00
|
|
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
|
|
|
bool read_only) {
|
2014-01-22 19:44:53 +00:00
|
|
|
std::unordered_map<std::string, ColumnFamilyOptions> cf_name_to_options;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
cf_name_to_options.insert({cf.name, cf.options});
|
|
|
|
}
|
|
|
|
// keeps track of column families in manifest that were not found in
|
|
|
|
// column families parameters. if those column families are not dropped
|
|
|
|
// by subsequent manifest records, Recover() will return failure status
|
2014-04-09 17:38:05 +00:00
|
|
|
std::unordered_map<int, std::string> column_families_not_found;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Read "CURRENT" file, which contains a pointer to the current manifest file
|
2013-12-31 02:33:57 +00:00
|
|
|
std::string manifest_filename;
|
|
|
|
Status s = ReadFileToString(
|
|
|
|
env_, CurrentFileName(dbname_), &manifest_filename
|
|
|
|
);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2013-12-31 02:33:57 +00:00
|
|
|
if (manifest_filename.empty() ||
|
|
|
|
manifest_filename.back() != '\n') {
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::Corruption("CURRENT file does not end with newline");
|
|
|
|
}
|
2013-12-31 02:33:57 +00:00
|
|
|
// remove the trailing '\n'
|
|
|
|
manifest_filename.resize(manifest_filename.size() - 1);
|
2014-03-12 17:52:32 +00:00
|
|
|
FileType type;
|
|
|
|
bool parse_ok =
|
|
|
|
ParseFileName(manifest_filename, &manifest_file_number_, &type);
|
|
|
|
if (!parse_ok || type != kDescriptorFile) {
|
|
|
|
return Status::Corruption("CURRENT file corrupted");
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-25 13:51:16 +00:00
|
|
|
Log(options_->info_log, "Recovering from manifest file: %s\n",
|
2013-12-31 02:33:57 +00:00
|
|
|
manifest_filename.c_str());
|
2012-08-23 02:15:06 +00:00
|
|
|
|
2013-12-31 02:33:57 +00:00
|
|
|
manifest_filename = dbname_ + "/" + manifest_filename;
|
|
|
|
unique_ptr<SequentialFile> manifest_file;
|
2014-03-12 17:52:32 +00:00
|
|
|
s = env_->NewSequentialFile(manifest_filename, &manifest_file,
|
|
|
|
storage_options_);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2012-09-24 21:01:01 +00:00
|
|
|
uint64_t manifest_file_size;
|
2013-12-31 02:33:57 +00:00
|
|
|
s = env_->GetFileSize(manifest_filename, &manifest_file_size);
|
2012-09-24 21:01:01 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
bool have_log_number = false;
|
2011-04-12 19:38:58 +00:00
|
|
|
bool have_prev_log_number = false;
|
2011-03-18 22:37:00 +00:00
|
|
|
bool have_next_file = false;
|
|
|
|
bool have_last_sequence = false;
|
|
|
|
uint64_t next_file = 0;
|
2011-04-12 19:38:58 +00:00
|
|
|
uint64_t last_sequence = 0;
|
|
|
|
uint64_t log_number = 0;
|
|
|
|
uint64_t prev_log_number = 0;
|
2014-03-05 20:13:44 +00:00
|
|
|
uint32_t max_column_family = 0;
|
2014-01-10 23:12:34 +00:00
|
|
|
std::unordered_map<uint32_t, Builder*> builders;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-01-06 21:31:06 +00:00
|
|
|
// add default column family
|
2014-04-09 16:56:17 +00:00
|
|
|
auto default_cf_iter = cf_name_to_options.find(kDefaultColumnFamilyName);
|
2014-01-22 19:44:53 +00:00
|
|
|
if (default_cf_iter == cf_name_to_options.end()) {
|
2014-03-13 01:09:03 +00:00
|
|
|
return Status::InvalidArgument("Default column family not specified");
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
2014-03-13 01:09:03 +00:00
|
|
|
VersionEdit default_cf_edit;
|
2014-04-09 16:56:17 +00:00
|
|
|
default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
|
2014-03-13 01:09:03 +00:00
|
|
|
default_cf_edit.SetColumnFamily(0);
|
|
|
|
ColumnFamilyData* default_cfd =
|
|
|
|
CreateColumnFamily(default_cf_iter->second, &default_cf_edit);
|
|
|
|
builders.insert({0, new Builder(default_cfd)});
|
2014-01-06 21:31:06 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
{
|
2014-01-22 19:44:53 +00:00
|
|
|
VersionSet::LogReporter reporter;
|
2011-03-18 22:37:00 +00:00
|
|
|
reporter.status = &s;
|
2013-12-31 02:33:57 +00:00
|
|
|
log::Reader reader(std::move(manifest_file), &reporter, true /*checksum*/,
|
|
|
|
0 /*initial_offset*/);
|
2011-03-18 22:37:00 +00:00
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
2014-01-14 23:27:09 +00:00
|
|
|
VersionEdit edit;
|
2011-03-18 22:37:00 +00:00
|
|
|
s = edit.DecodeFrom(record);
|
2014-01-10 23:12:34 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-02-03 20:08:33 +00:00
|
|
|
// Not found means that user didn't supply that column
|
|
|
|
// family option AND we encountered column family add
|
|
|
|
// record. Once we encounter column family drop record,
|
|
|
|
// we will delete the column family from
|
|
|
|
// column_families_not_found.
|
2014-01-22 19:44:53 +00:00
|
|
|
bool cf_in_not_found =
|
|
|
|
column_families_not_found.find(edit.column_family_) !=
|
|
|
|
column_families_not_found.end();
|
2014-02-03 20:08:33 +00:00
|
|
|
// in builders means that user supplied that column family
|
|
|
|
// option AND that we encountered column family add record
|
2014-01-22 19:44:53 +00:00
|
|
|
bool cf_in_builders =
|
|
|
|
builders.find(edit.column_family_) != builders.end();
|
|
|
|
|
|
|
|
// they can't both be true
|
|
|
|
assert(!(cf_in_not_found && cf_in_builders));
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
|
2014-01-02 17:08:12 +00:00
|
|
|
if (edit.is_column_family_add_) {
|
2014-01-22 19:44:53 +00:00
|
|
|
if (cf_in_builders || cf_in_not_found) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto cf_options = cf_name_to_options.find(edit.column_family_name_);
|
|
|
|
if (cf_options == cf_name_to_options.end()) {
|
2014-04-09 17:38:05 +00:00
|
|
|
column_families_not_found.insert(
|
|
|
|
{edit.column_family_, edit.column_family_name_});
|
2014-01-22 19:44:53 +00:00
|
|
|
} else {
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = CreateColumnFamily(cf_options->second, &edit);
|
|
|
|
builders.insert({edit.column_family_, new Builder(cfd)});
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
2014-01-10 23:12:34 +00:00
|
|
|
} else if (edit.is_column_family_drop_) {
|
2014-01-22 19:44:53 +00:00
|
|
|
if (cf_in_builders) {
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
delete builder->second;
|
|
|
|
builders.erase(builder);
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-02-11 01:04:44 +00:00
|
|
|
if (cfd->Unref()) {
|
|
|
|
delete cfd;
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = nullptr;
|
2014-02-11 01:04:44 +00:00
|
|
|
} else {
|
|
|
|
// who else can have reference to cfd!?
|
|
|
|
assert(false);
|
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
} else if (cf_in_not_found) {
|
|
|
|
column_families_not_found.erase(edit.column_family_);
|
|
|
|
} else {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (!cf_in_not_found) {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest record referencing unknown column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-01-22 19:44:53 +00:00
|
|
|
// this should never happen since cf_in_builders is true
|
|
|
|
assert(cfd != nullptr);
|
2014-01-29 21:28:50 +00:00
|
|
|
if (edit.max_level_ >= cfd->current()->NumberLevels()) {
|
2014-01-22 01:01:52 +00:00
|
|
|
s = Status::InvalidArgument(
|
|
|
|
"db has more levels than options.num_levels");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
// if it is not column family add or column family drop,
|
|
|
|
// then it's a file add/delete, which should be forwarded
|
|
|
|
// to builder
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
builder->second->Apply(&edit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfd != nullptr) {
|
2014-01-28 19:05:04 +00:00
|
|
|
if (edit.has_log_number_) {
|
2014-03-14 20:11:41 +00:00
|
|
|
if (cfd->GetLogNumber() > edit.log_number_) {
|
2014-03-31 19:44:54 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"MANIFEST corruption detected, but ignored - Log numbers in "
|
|
|
|
"records NOT monotonically increasing");
|
2014-03-18 20:24:27 +00:00
|
|
|
} else {
|
|
|
|
cfd->SetLogNumber(edit.log_number_);
|
|
|
|
have_log_number = true;
|
2014-03-14 20:11:41 +00:00
|
|
|
}
|
2014-01-28 19:05:04 +00:00
|
|
|
}
|
2014-02-03 20:08:33 +00:00
|
|
|
if (edit.has_comparator_ &&
|
|
|
|
edit.comparator_ != cfd->user_comparator()->Name()) {
|
|
|
|
s = Status::InvalidArgument(
|
|
|
|
cfd->user_comparator()->Name(),
|
|
|
|
"does not match existing comparator " + edit.comparator_);
|
|
|
|
break;
|
|
|
|
}
|
2014-01-02 17:08:12 +00:00
|
|
|
}
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
if (edit.has_prev_log_number_) {
|
|
|
|
prev_log_number = edit.prev_log_number_;
|
|
|
|
have_prev_log_number = true;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (edit.has_next_file_number_) {
|
|
|
|
next_file = edit.next_file_number_;
|
|
|
|
have_next_file = true;
|
|
|
|
}
|
|
|
|
|
2014-03-05 20:13:44 +00:00
|
|
|
if (edit.has_max_column_family_) {
|
|
|
|
max_column_family = edit.max_column_family_;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (edit.has_last_sequence_) {
|
2011-04-12 19:38:58 +00:00
|
|
|
last_sequence = edit.last_sequence_;
|
2011-03-18 22:37:00 +00:00
|
|
|
have_last_sequence = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (!have_next_file) {
|
|
|
|
s = Status::Corruption("no meta-nextfile entry in descriptor");
|
|
|
|
} else if (!have_log_number) {
|
|
|
|
s = Status::Corruption("no meta-lognumber entry in descriptor");
|
|
|
|
} else if (!have_last_sequence) {
|
|
|
|
s = Status::Corruption("no last-sequence-number entry in descriptor");
|
|
|
|
}
|
2011-04-12 19:38:58 +00:00
|
|
|
|
|
|
|
if (!have_prev_log_number) {
|
|
|
|
prev_log_number = 0;
|
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
|
2014-03-05 20:13:44 +00:00
|
|
|
column_family_set_->UpdateMaxColumnFamily(max_column_family);
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
MarkFileNumberUsed(prev_log_number);
|
|
|
|
MarkFileNumberUsed(log_number);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
// there were some column families in the MANIFEST that weren't specified
|
2014-04-09 16:56:17 +00:00
|
|
|
// in the argument. This is OK in read_only mode
|
|
|
|
if (read_only == false && column_families_not_found.size() > 0) {
|
|
|
|
std::string list_of_not_found;
|
2014-04-09 17:38:05 +00:00
|
|
|
for (const auto& cf : column_families_not_found) {
|
|
|
|
list_of_not_found += ", " + cf.second;
|
2014-04-09 16:56:17 +00:00
|
|
|
}
|
|
|
|
list_of_not_found = list_of_not_found.substr(2);
|
2014-01-22 19:44:53 +00:00
|
|
|
s = Status::InvalidArgument(
|
2014-04-09 17:38:05 +00:00
|
|
|
"You have to open all column families. Column families not opened: " +
|
|
|
|
list_of_not_found);
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (s.ok()) {
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-02-12 22:01:30 +00:00
|
|
|
auto builders_iter = builders.find(cfd->GetID());
|
|
|
|
assert(builders_iter != builders.end());
|
|
|
|
auto builder = builders_iter->second;
|
|
|
|
|
|
|
|
if (options_->max_open_files == -1) {
|
2014-02-12 18:43:27 +00:00
|
|
|
// unlimited table cache. Pre-load table handle now.
|
|
|
|
// Need to do it out of the mutex.
|
2014-02-12 22:01:30 +00:00
|
|
|
builder->LoadTableHandlers();
|
|
|
|
}
|
2014-02-12 18:43:27 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
Version* v = new Version(cfd, this, current_version_number_++);
|
2014-02-12 22:01:30 +00:00
|
|
|
builder->SaveTo(v);
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
// Install recovered version
|
2014-01-22 01:01:52 +00:00
|
|
|
std::vector<uint64_t> size_being_compacted(v->NumberLevels() - 1);
|
2014-01-31 23:30:27 +00:00
|
|
|
cfd->compaction_picker()->SizeBeingCompacted(size_being_compacted);
|
2014-03-20 00:22:20 +00:00
|
|
|
v->ComputeCompactionScore(size_being_compacted);
|
2014-03-18 16:45:52 +00:00
|
|
|
v->UpdateFilesBySize();
|
2014-01-22 19:44:53 +00:00
|
|
|
AppendVersion(cfd, v);
|
2014-01-10 23:12:34 +00:00
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_ = manifest_file_size;
|
2011-05-21 02:17:43 +00:00
|
|
|
next_file_number_ = next_file + 1;
|
|
|
|
last_sequence_ = last_sequence;
|
|
|
|
prev_log_number_ = prev_log_number;
|
2012-08-23 02:15:06 +00:00
|
|
|
|
2012-08-24 18:28:59 +00:00
|
|
|
Log(options_->info_log, "Recovered from manifest file:%s succeeded,"
|
2013-11-13 05:02:03 +00:00
|
|
|
"manifest_file_number is %lu, next_file_number is %lu, "
|
|
|
|
"last_sequence is %lu, log_number is %lu,"
|
2014-03-05 20:13:44 +00:00
|
|
|
"prev_log_number is %lu,"
|
|
|
|
"max_column_family is %u\n",
|
2013-12-31 02:33:57 +00:00
|
|
|
manifest_filename.c_str(),
|
2013-11-13 05:02:03 +00:00
|
|
|
(unsigned long)manifest_file_number_,
|
|
|
|
(unsigned long)next_file_number_,
|
|
|
|
(unsigned long)last_sequence_,
|
2014-01-31 01:48:42 +00:00
|
|
|
(unsigned long)log_number,
|
2014-03-05 20:13:44 +00:00
|
|
|
(unsigned long)prev_log_number_,
|
|
|
|
column_family_set_->GetMaxColumnFamily());
|
2014-01-28 19:05:04 +00:00
|
|
|
|
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-02-26 22:16:23 +00:00
|
|
|
Log(options_->info_log,
|
2014-04-25 13:51:16 +00:00
|
|
|
"Column family [%s] (ID %u), log number is %" PRIu64 "\n",
|
|
|
|
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
|
2014-01-28 19:05:04 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
for (auto builder : builders) {
|
|
|
|
delete builder.second;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families,
|
|
|
|
const std::string& dbname, Env* env) {
|
|
|
|
// these are just for performance reasons, not correcntes,
|
|
|
|
// so we're fine using the defaults
|
|
|
|
EnvOptions soptions;
|
|
|
|
// Read "CURRENT" file, which contains a pointer to the current manifest file
|
|
|
|
std::string current;
|
|
|
|
Status s = ReadFileToString(env, CurrentFileName(dbname), ¤t);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (current.empty() || current[current.size()-1] != '\n') {
|
|
|
|
return Status::Corruption("CURRENT file does not end with newline");
|
|
|
|
}
|
|
|
|
current.resize(current.size() - 1);
|
|
|
|
|
|
|
|
std::string dscname = dbname + "/" + current;
|
|
|
|
unique_ptr<SequentialFile> file;
|
|
|
|
s = env->NewSequentialFile(dscname, &file, soptions);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::map<uint32_t, std::string> column_family_names;
|
|
|
|
// default column family is always implicitly there
|
2014-04-09 16:56:17 +00:00
|
|
|
column_family_names.insert({0, kDefaultColumnFamilyName});
|
2014-01-22 19:44:53 +00:00
|
|
|
VersionSet::LogReporter reporter;
|
|
|
|
reporter.status = &s;
|
|
|
|
log::Reader reader(std::move(file), &reporter, true /*checksum*/,
|
|
|
|
0 /*initial_offset*/);
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
2014-02-11 01:04:44 +00:00
|
|
|
VersionEdit edit;
|
|
|
|
s = edit.DecodeFrom(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (edit.is_column_family_add_) {
|
2014-02-28 22:05:11 +00:00
|
|
|
if (column_family_names.find(edit.column_family_) !=
|
|
|
|
column_family_names.end()) {
|
|
|
|
s = Status::Corruption("Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
2014-02-11 01:04:44 +00:00
|
|
|
column_family_names.insert(
|
|
|
|
{edit.column_family_, edit.column_family_name_});
|
|
|
|
} else if (edit.is_column_family_drop_) {
|
2014-02-28 22:05:11 +00:00
|
|
|
if (column_family_names.find(edit.column_family_) ==
|
|
|
|
column_family_names.end()) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
2014-02-11 01:04:44 +00:00
|
|
|
column_family_names.erase(edit.column_family_);
|
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
column_families->clear();
|
|
|
|
if (s.ok()) {
|
|
|
|
for (const auto& iter : column_family_names) {
|
|
|
|
column_families->push_back(iter.second);
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
Status VersionSet::ReduceNumberOfLevels(const std::string& dbname,
|
|
|
|
const Options* options,
|
|
|
|
const EnvOptions& storage_options,
|
|
|
|
int new_levels) {
|
|
|
|
if (new_levels <= 1) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Number of levels needs to be bigger than 1");
|
|
|
|
}
|
|
|
|
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
ColumnFamilyOptions cf_options(*options);
|
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(
|
|
|
|
options->max_open_files - 10, options->table_cache_numshardbits,
|
|
|
|
options->table_cache_remove_scan_count_limit));
|
|
|
|
VersionSet versions(dbname, options, storage_options, tc.get());
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
Status status;
|
|
|
|
|
2014-01-24 23:03:54 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
2014-04-09 16:56:17 +00:00
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
2014-02-26 18:03:34 +00:00
|
|
|
ColumnFamilyOptions(*options));
|
2014-02-01 03:44:48 +00:00
|
|
|
dummy.push_back(dummy_descriptor);
|
2014-01-24 23:03:54 +00:00
|
|
|
status = versions.Recover(dummy);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2014-01-27 22:33:50 +00:00
|
|
|
Version* current_version =
|
2014-01-29 21:28:50 +00:00
|
|
|
versions.GetColumnFamilySet()->GetDefault()->current();
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
int current_levels = current_version->NumberLevels();
|
|
|
|
|
|
|
|
if (current_levels <= new_levels) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure there are file only on one level from
|
|
|
|
// (new_levels-1) to (current_levels-1)
|
|
|
|
int first_nonempty_level = -1;
|
|
|
|
int first_nonempty_level_filenum = 0;
|
|
|
|
for (int i = new_levels - 1; i < current_levels; i++) {
|
|
|
|
int file_num = current_version->NumLevelFiles(i);
|
|
|
|
if (file_num != 0) {
|
|
|
|
if (first_nonempty_level < 0) {
|
|
|
|
first_nonempty_level = i;
|
|
|
|
first_nonempty_level_filenum = file_num;
|
|
|
|
} else {
|
|
|
|
char msg[255];
|
|
|
|
snprintf(msg, sizeof(msg),
|
|
|
|
"Found at least two levels containing files: "
|
|
|
|
"[%d:%d],[%d:%d].\n",
|
|
|
|
first_nonempty_level, first_nonempty_level_filenum, i,
|
|
|
|
file_num);
|
|
|
|
return Status::InvalidArgument(msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<FileMetaData*>* old_files_list = current_version->files_;
|
2014-01-25 02:30:00 +00:00
|
|
|
// we need to allocate an array with the old number of levels size to
|
|
|
|
// avoid SIGSEGV in WriteSnapshot()
|
|
|
|
// however, all levels bigger or equal to new_levels will be empty
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
std::vector<FileMetaData*>* new_files_list =
|
2014-01-25 02:30:00 +00:00
|
|
|
new std::vector<FileMetaData*>[current_levels];
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
for (int i = 0; i < new_levels - 1; i++) {
|
|
|
|
new_files_list[i] = old_files_list[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first_nonempty_level > 0) {
|
|
|
|
new_files_list[new_levels - 1] = old_files_list[first_nonempty_level];
|
|
|
|
}
|
|
|
|
|
|
|
|
delete[] current_version->files_;
|
|
|
|
current_version->files_ = new_files_list;
|
|
|
|
current_version->num_levels_ = new_levels;
|
|
|
|
|
|
|
|
VersionEdit ve;
|
|
|
|
port::Mutex dummy_mutex;
|
|
|
|
MutexLock l(&dummy_mutex);
|
2014-01-27 21:55:47 +00:00
|
|
|
return versions.LogAndApply(versions.GetColumnFamilySet()->GetDefault(), &ve,
|
|
|
|
&dummy_mutex, nullptr, true);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
|
|
|
bool verbose, bool hex) {
|
2012-08-17 17:48:40 +00:00
|
|
|
// Open the specified manifest file.
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<SequentialFile> file;
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = options.env->NewSequentialFile(dscname, &file, storage_options_);
|
2012-08-17 17:48:40 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool have_prev_log_number = false;
|
|
|
|
bool have_next_file = false;
|
|
|
|
bool have_last_sequence = false;
|
|
|
|
uint64_t next_file = 0;
|
|
|
|
uint64_t last_sequence = 0;
|
|
|
|
uint64_t prev_log_number = 0;
|
2012-11-19 19:54:13 +00:00
|
|
|
int count = 0;
|
2014-02-28 00:18:23 +00:00
|
|
|
std::unordered_map<uint32_t, std::string> comparators;
|
|
|
|
std::unordered_map<uint32_t, Builder*> builders;
|
|
|
|
|
|
|
|
// add default column family
|
|
|
|
VersionEdit default_cf_edit;
|
2014-04-09 16:56:17 +00:00
|
|
|
default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
|
2014-02-28 00:18:23 +00:00
|
|
|
default_cf_edit.SetColumnFamily(0);
|
|
|
|
ColumnFamilyData* default_cfd =
|
|
|
|
CreateColumnFamily(ColumnFamilyOptions(options), &default_cf_edit);
|
2014-02-28 19:25:38 +00:00
|
|
|
builders.insert({0, new Builder(default_cfd)});
|
2012-08-17 17:48:40 +00:00
|
|
|
|
|
|
|
{
|
2014-01-22 19:44:53 +00:00
|
|
|
VersionSet::LogReporter reporter;
|
2012-08-17 17:48:40 +00:00
|
|
|
reporter.status = &s;
|
2013-01-20 10:07:13 +00:00
|
|
|
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
|
|
|
0/*initial_offset*/);
|
2012-08-17 17:48:40 +00:00
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
2014-01-14 23:27:09 +00:00
|
|
|
VersionEdit edit;
|
2012-08-17 17:48:40 +00:00
|
|
|
s = edit.DecodeFrom(record);
|
2014-02-28 00:18:23 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
|
2012-11-19 20:16:45 +00:00
|
|
|
// Write out each individual edit
|
|
|
|
if (verbose) {
|
2012-11-29 00:42:36 +00:00
|
|
|
printf("*************************Edit[%d] = %s\n",
|
2013-08-08 22:51:16 +00:00
|
|
|
count, edit.DebugString(hex).c_str());
|
2012-11-19 20:16:45 +00:00
|
|
|
}
|
|
|
|
count++;
|
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
bool cf_in_builders =
|
|
|
|
builders.find(edit.column_family_) != builders.end();
|
|
|
|
|
|
|
|
if (edit.has_comparator_) {
|
|
|
|
comparators.insert({edit.column_family_, edit.comparator_});
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
if (edit.is_column_family_add_) {
|
|
|
|
if (cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = CreateColumnFamily(ColumnFamilyOptions(options), &edit);
|
|
|
|
builders.insert({edit.column_family_, new Builder(cfd)});
|
2014-02-28 00:18:23 +00:00
|
|
|
} else if (edit.is_column_family_drop_) {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto builder_iter = builders.find(edit.column_family_);
|
|
|
|
delete builder_iter->second;
|
|
|
|
builders.erase(builder_iter);
|
|
|
|
comparators.erase(edit.column_family_);
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-02-28 00:18:23 +00:00
|
|
|
assert(cfd != nullptr);
|
|
|
|
cfd->Unref();
|
|
|
|
delete cfd;
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = nullptr;
|
2014-02-28 00:18:23 +00:00
|
|
|
} else {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest record referencing unknown column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-02-28 00:18:23 +00:00
|
|
|
// this should never happen since cf_in_builders is true
|
|
|
|
assert(cfd != nullptr);
|
|
|
|
|
|
|
|
// if it is not column family add or column family drop,
|
|
|
|
// then it's a file add/delete, which should be forwarded
|
|
|
|
// to builder
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
builder->second->Apply(&edit);
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
if (cfd != nullptr && edit.has_log_number_) {
|
|
|
|
cfd->SetLogNumber(edit.log_number_);
|
|
|
|
}
|
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
if (edit.has_prev_log_number_) {
|
|
|
|
prev_log_number = edit.prev_log_number_;
|
|
|
|
have_prev_log_number = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_next_file_number_) {
|
|
|
|
next_file = edit.next_file_number_;
|
|
|
|
have_next_file = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_last_sequence_) {
|
|
|
|
last_sequence = edit.last_sequence_;
|
|
|
|
have_last_sequence = true;
|
|
|
|
}
|
2014-03-05 20:13:44 +00:00
|
|
|
|
|
|
|
if (edit.has_max_column_family_) {
|
|
|
|
column_family_set_->UpdateMaxColumnFamily(edit.max_column_family_);
|
|
|
|
}
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-20 10:07:13 +00:00
|
|
|
file.reset();
|
2012-08-17 17:48:40 +00:00
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (!have_next_file) {
|
|
|
|
s = Status::Corruption("no meta-nextfile entry in descriptor");
|
|
|
|
printf("no meta-nextfile entry in descriptor");
|
|
|
|
} else if (!have_last_sequence) {
|
|
|
|
printf("no last-sequence-number entry in descriptor");
|
|
|
|
s = Status::Corruption("no last-sequence-number entry in descriptor");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!have_prev_log_number) {
|
|
|
|
prev_log_number = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2014-02-28 00:18:23 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
auto builders_iter = builders.find(cfd->GetID());
|
|
|
|
assert(builders_iter != builders.end());
|
|
|
|
auto builder = builders_iter->second;
|
|
|
|
|
|
|
|
Version* v = new Version(cfd, this, current_version_number_++);
|
|
|
|
builder->SaveTo(v);
|
2014-03-18 16:45:52 +00:00
|
|
|
std::vector<uint64_t> size_being_compacted(v->NumberLevels() - 1);
|
|
|
|
cfd->compaction_picker()->SizeBeingCompacted(size_being_compacted);
|
2014-03-20 00:22:20 +00:00
|
|
|
v->ComputeCompactionScore(size_being_compacted);
|
2014-03-18 16:45:52 +00:00
|
|
|
v->UpdateFilesBySize();
|
2014-02-28 00:18:23 +00:00
|
|
|
delete builder;
|
|
|
|
|
|
|
|
printf("--------------- Column family \"%s\" (ID %u) --------------\n",
|
|
|
|
cfd->GetName().c_str(), (unsigned int)cfd->GetID());
|
|
|
|
printf("log number: %lu\n", (unsigned long)cfd->GetLogNumber());
|
|
|
|
auto comparator = comparators.find(cfd->GetID());
|
|
|
|
if (comparator != comparators.end()) {
|
|
|
|
printf("comparator: %s\n", comparator->second.c_str());
|
|
|
|
} else {
|
|
|
|
printf("comparator: <NO COMPARATOR>\n");
|
|
|
|
}
|
|
|
|
printf("%s \n", v->DebugString(hex).c_str());
|
|
|
|
delete v;
|
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
next_file_number_ = next_file + 1;
|
|
|
|
last_sequence_ = last_sequence;
|
|
|
|
prev_log_number_ = prev_log_number;
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
printf(
|
2014-03-12 17:52:32 +00:00
|
|
|
"next_file_number %lu last_sequence "
|
2014-03-05 20:13:44 +00:00
|
|
|
"%lu prev_log_number %lu max_column_family %u\n",
|
2014-03-12 17:52:32 +00:00
|
|
|
(unsigned long)next_file_number_, (unsigned long)last_sequence,
|
|
|
|
(unsigned long)prev_log_number,
|
2014-03-05 20:13:44 +00:00
|
|
|
column_family_set_->GetMaxColumnFamily());
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
return s;
|
|
|
|
}
|
2014-04-15 20:39:26 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
void VersionSet::MarkFileNumberUsed(uint64_t number) {
|
|
|
|
if (next_file_number_ <= number) {
|
|
|
|
next_file_number_ = number + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Status VersionSet::WriteSnapshot(log::Writer* log) {
|
|
|
|
// TODO: Break up into multiple records to reduce memory usage on recovery?
|
2013-10-16 20:32:53 +00:00
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
// WARNING: This method doesn't hold a mutex!!
|
|
|
|
|
2014-03-11 03:22:31 +00:00
|
|
|
// This is done without DB mutex lock held, but only within single-threaded
|
|
|
|
// LogAndApply. Column family manipulations can only happen within LogAndApply
|
2014-03-13 01:09:03 +00:00
|
|
|
// (the same single thread), so we're safe to iterate.
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-01-22 01:01:52 +00:00
|
|
|
{
|
|
|
|
// Store column family info
|
|
|
|
VersionEdit edit;
|
2014-01-29 21:28:50 +00:00
|
|
|
if (cfd->GetID() != 0) {
|
2014-01-22 01:01:52 +00:00
|
|
|
// default column family is always there,
|
|
|
|
// no need to explicitly write it
|
2014-01-29 21:28:50 +00:00
|
|
|
edit.AddColumnFamily(cfd->GetName());
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
2014-02-03 20:08:33 +00:00
|
|
|
}
|
|
|
|
edit.SetComparatorName(
|
|
|
|
cfd->internal_comparator().user_comparator()->Name());
|
|
|
|
std::string record;
|
|
|
|
edit.EncodeTo(&record);
|
|
|
|
Status s = log->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
2014-01-22 01:01:52 +00:00
|
|
|
}
|
2012-10-19 21:00:53 +00:00
|
|
|
|
2014-01-22 01:01:52 +00:00
|
|
|
{
|
|
|
|
// Save files
|
|
|
|
VersionEdit edit;
|
2014-01-29 21:28:50 +00:00
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
2014-01-22 01:01:52 +00:00
|
|
|
|
2014-02-03 20:08:33 +00:00
|
|
|
for (int level = 0; level < cfd->NumberLevels(); level++) {
|
2014-01-29 21:28:50 +00:00
|
|
|
for (const auto& f : cfd->current()->files_[level]) {
|
2014-01-22 01:01:52 +00:00
|
|
|
edit.AddFile(level,
|
|
|
|
f->number,
|
|
|
|
f->file_size,
|
|
|
|
f->smallest,
|
|
|
|
f->largest,
|
|
|
|
f->smallest_seqno,
|
|
|
|
f->largest_seqno);
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
|
|
|
}
|
2014-01-29 21:28:50 +00:00
|
|
|
edit.SetLogNumber(cfd->GetLogNumber());
|
2014-01-22 01:01:52 +00:00
|
|
|
std::string record;
|
|
|
|
edit.EncodeTo(&record);
|
|
|
|
Status s = log->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
2014-01-10 23:12:34 +00:00
|
|
|
}
|
2014-01-02 17:08:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
return Status::OK();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-08 20:00:13 +00:00
|
|
|
// Opens the mainfest file and reads all records
|
|
|
|
// till it finds the record we are looking for.
|
2014-03-18 04:50:15 +00:00
|
|
|
bool VersionSet::ManifestContains(uint64_t manifest_file_number,
|
|
|
|
const std::string& record) const {
|
|
|
|
std::string fname =
|
|
|
|
DescriptorFileName(dbname_, manifest_file_number);
|
2013-01-08 20:00:13 +00:00
|
|
|
Log(options_->info_log, "ManifestContains: checking %s\n", fname.c_str());
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<SequentialFile> file;
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = env_->NewSequentialFile(fname, &file, storage_options_);
|
2013-01-08 20:00:13 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str());
|
2013-03-06 21:28:54 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"ManifestContains: is unable to reopen the manifest file %s",
|
|
|
|
fname.c_str());
|
2013-01-08 20:00:13 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
log::Reader reader(std::move(file), nullptr, true/*checksum*/, 0);
|
2013-01-08 20:00:13 +00:00
|
|
|
Slice r;
|
|
|
|
std::string scratch;
|
|
|
|
bool result = false;
|
|
|
|
while (reader.ReadRecord(&r, &scratch)) {
|
|
|
|
if (r == Slice(record)) {
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Log(options_->info_log, "ManifestContains: result = %d\n", result ? 1 : 0);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
|
|
|
|
uint64_t result = 0;
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
const std::vector<FileMetaData*>& files = v->files_[level];
|
2011-04-20 22:48:11 +00:00
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
2014-02-03 20:08:33 +00:00
|
|
|
if (v->cfd_->internal_comparator().Compare(files[i]->largest, ikey) <=
|
|
|
|
0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Entire file is before "ikey", so just add the file size
|
|
|
|
result += files[i]->file_size;
|
2014-02-03 20:08:33 +00:00
|
|
|
} else if (v->cfd_->internal_comparator().Compare(files[i]->smallest,
|
|
|
|
ikey) > 0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Entire file is after "ikey", so ignore
|
|
|
|
if (level > 0) {
|
|
|
|
// Files other than level 0 are sorted by meta->smallest, so
|
|
|
|
// no further files in this level will contain data for
|
|
|
|
// "ikey".
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// "ikey" falls in the range for this table. Add the
|
|
|
|
// approximate offset of "ikey" within the table.
|
2013-10-30 17:52:33 +00:00
|
|
|
TableReader* table_reader_ptr;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
Iterator* iter = v->cfd_->table_cache()->NewIterator(
|
2014-02-06 23:42:16 +00:00
|
|
|
ReadOptions(), storage_options_, v->cfd_->internal_comparator(),
|
|
|
|
*(files[i]), &table_reader_ptr);
|
2013-10-30 17:52:33 +00:00
|
|
|
if (table_reader_ptr != nullptr) {
|
|
|
|
result += table_reader_ptr->ApproximateOffsetOf(ikey.Encode());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
2013-04-11 23:49:53 +00:00
|
|
|
void VersionSet::AddLiveFiles(std::vector<uint64_t>* live_list) {
|
|
|
|
// pre-calculate space requirement
|
|
|
|
int64_t total_files = 0;
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* dummy_versions = cfd->dummy_versions();
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions;
|
2014-01-22 19:44:53 +00:00
|
|
|
v = v->next_) {
|
2014-01-22 01:01:52 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2014-01-10 23:12:34 +00:00
|
|
|
total_files += v->files_[level].size();
|
|
|
|
}
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
2013-04-11 23:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// just one time extension to the right size
|
|
|
|
live_list->reserve(live_list->size() + total_files);
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* dummy_versions = cfd->dummy_versions();
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions;
|
2014-01-22 19:44:53 +00:00
|
|
|
v = v->next_) {
|
2014-01-22 01:01:52 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2014-01-10 23:12:34 +00:00
|
|
|
for (const auto& f : v->files_[level]) {
|
|
|
|
live_list->push_back(f->number);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* VersionSet::MakeInputIterator(Compaction* c) {
|
2014-04-25 19:22:23 +00:00
|
|
|
auto cfd = c->column_family_data();
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.verify_checksums =
|
|
|
|
cfd->options()->verify_checksums_in_compaction;
|
|
|
|
read_options.fill_cache = false;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Level-0 files have to be merged together. For other levels,
|
|
|
|
// we will make a concatenating iterator per level.
|
|
|
|
// TODO(opt): use concatenating iterator for level-0 if there is no overlap
|
2014-01-22 18:55:16 +00:00
|
|
|
const int space = (c->level() == 0 ? c->inputs(0)->size() + 1 : 2);
|
2011-03-18 22:37:00 +00:00
|
|
|
Iterator** list = new Iterator*[space];
|
|
|
|
int num = 0;
|
|
|
|
for (int which = 0; which < 2; which++) {
|
2014-01-22 18:55:16 +00:00
|
|
|
if (!c->inputs(which)->empty()) {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (c->level() + which == 0) {
|
2014-01-22 18:55:16 +00:00
|
|
|
for (const auto& file : *c->inputs(which)) {
|
2014-04-25 19:22:23 +00:00
|
|
|
list[num++] = cfd->table_cache()->NewIterator(
|
|
|
|
read_options, storage_options_compactions_,
|
|
|
|
cfd->internal_comparator(), *file, nullptr,
|
2014-01-24 00:32:49 +00:00
|
|
|
true /* for compaction */);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Create concatenating iterator for the files from this level
|
2014-04-25 19:22:23 +00:00
|
|
|
list[num++] = NewTwoLevelIterator(new Version::LevelFileIteratorState(
|
|
|
|
cfd->table_cache(), read_options, storage_options_,
|
|
|
|
cfd->internal_comparator(), true /* for_compaction */,
|
|
|
|
false /* prefix enabled */),
|
|
|
|
new Version::LevelFileNumIterator(cfd->internal_comparator(),
|
|
|
|
c->inputs(which)));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(num <= space);
|
2014-02-01 00:45:20 +00:00
|
|
|
Iterator* result = NewMergingIterator(
|
2014-04-08 20:40:42 +00:00
|
|
|
&c->column_family_data()->internal_comparator(), list, num);
|
2011-03-18 22:37:00 +00:00
|
|
|
delete[] list;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-11-29 00:42:36 +00:00
|
|
|
// verify that the files listed in this compaction are present
|
2012-10-19 21:00:53 +00:00
|
|
|
// in the current version
|
|
|
|
bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
|
2013-03-06 21:28:54 +00:00
|
|
|
#ifndef NDEBUG
|
2014-02-01 00:45:20 +00:00
|
|
|
Version* version = c->column_family_data()->current();
|
2014-01-22 18:59:07 +00:00
|
|
|
if (c->input_version() != version) {
|
2014-04-25 13:51:16 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"[%s] VerifyCompactionFileConsistency version mismatch",
|
|
|
|
c->column_family_data()->GetName().c_str());
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// verify files in level
|
|
|
|
int level = c->level();
|
|
|
|
for (int i = 0; i < c->num_input_files(0); i++) {
|
|
|
|
uint64_t number = c->input(0,i)->number;
|
|
|
|
|
|
|
|
// look for this file in the current version
|
|
|
|
bool found = false;
|
2014-01-10 23:12:34 +00:00
|
|
|
for (unsigned int j = 0; j < version->files_[level].size(); j++) {
|
|
|
|
FileMetaData* f = version->files_[level][j];
|
2012-10-19 21:00:53 +00:00
|
|
|
if (f->number == number) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
return false; // input files non existant in current version
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// verify level+1 files
|
|
|
|
level++;
|
|
|
|
for (int i = 0; i < c->num_input_files(1); i++) {
|
|
|
|
uint64_t number = c->input(1,i)->number;
|
|
|
|
|
|
|
|
// look for this file in the current version
|
|
|
|
bool found = false;
|
2014-01-10 23:12:34 +00:00
|
|
|
for (unsigned int j = 0; j < version->files_[level].size(); j++) {
|
|
|
|
FileMetaData* f = version->files_[level][j];
|
2012-10-19 21:00:53 +00:00
|
|
|
if (f->number == number) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
return false; // input files non existant in current version
|
|
|
|
}
|
|
|
|
}
|
2013-03-06 21:28:54 +00:00
|
|
|
#endif
|
2012-10-19 21:00:53 +00:00
|
|
|
return true; // everything good
|
|
|
|
}
|
|
|
|
|
2014-01-16 00:15:43 +00:00
|
|
|
Status VersionSet::GetMetadataForFile(uint64_t number, int* filelevel,
|
2014-02-06 23:42:16 +00:00
|
|
|
FileMetaData** meta,
|
2014-01-27 22:33:50 +00:00
|
|
|
ColumnFamilyData** cfd) {
|
|
|
|
for (auto cfd_iter : *column_family_set_) {
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* version = cfd_iter->current();
|
2014-01-22 01:01:52 +00:00
|
|
|
for (int level = 0; level < version->NumberLevels(); level++) {
|
2014-01-22 19:44:53 +00:00
|
|
|
for (const auto& file : version->files_[level]) {
|
|
|
|
if (file->number == number) {
|
2014-02-06 23:42:16 +00:00
|
|
|
*meta = file;
|
2014-01-10 23:12:34 +00:00
|
|
|
*filelevel = level;
|
2014-01-27 22:33:50 +00:00
|
|
|
*cfd = cfd_iter;
|
2014-01-10 23:12:34 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::NotFound("File not present in any level");
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
void VersionSet::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-02-03 20:08:33 +00:00
|
|
|
for (int level = 0; level < cfd->NumberLevels(); level++) {
|
2014-01-29 21:28:50 +00:00
|
|
|
for (const auto& file : cfd->current()->files_[level]) {
|
2014-01-10 23:12:34 +00:00
|
|
|
LiveFileMetaData filemetadata;
|
2014-04-30 20:24:52 +00:00
|
|
|
filemetadata.column_family_name = cfd->GetName();
|
2014-01-22 19:44:53 +00:00
|
|
|
filemetadata.name = TableFileName("", file->number);
|
2014-01-10 23:12:34 +00:00
|
|
|
filemetadata.level = level;
|
2014-01-22 19:44:53 +00:00
|
|
|
filemetadata.size = file->file_size;
|
|
|
|
filemetadata.smallestkey = file->smallest.user_key().ToString();
|
|
|
|
filemetadata.largestkey = file->largest.user_key().ToString();
|
|
|
|
filemetadata.smallest_seqno = file->smallest_seqno;
|
|
|
|
filemetadata.largest_seqno = file->largest_seqno;
|
2014-01-10 23:12:34 +00:00
|
|
|
metadata->push_back(filemetadata);
|
|
|
|
}
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-12 19:53:26 +00:00
|
|
|
void VersionSet::GetObsoleteFiles(std::vector<FileMetaData*>* files) {
|
2014-01-10 23:12:34 +00:00
|
|
|
files->insert(files->end(), obsolete_files_.begin(), obsolete_files_.end());
|
2013-11-08 23:23:46 +00:00
|
|
|
obsolete_files_.clear();
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
ColumnFamilyData* VersionSet::CreateColumnFamily(
|
|
|
|
const ColumnFamilyOptions& options, VersionEdit* edit) {
|
|
|
|
assert(edit->is_column_family_add_);
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
Version* dummy_versions = new Version(nullptr, this);
|
2014-01-22 19:44:53 +00:00
|
|
|
auto new_cfd = column_family_set_->CreateColumnFamily(
|
|
|
|
edit->column_family_name_, edit->column_family_, dummy_versions, options);
|
|
|
|
|
2014-03-18 21:23:47 +00:00
|
|
|
Version* v = new Version(new_cfd, this, current_version_number_++);
|
|
|
|
|
|
|
|
AppendVersion(new_cfd, v);
|
2014-01-24 22:30:28 +00:00
|
|
|
new_cfd->CreateNewMemtable();
|
2014-02-28 19:08:24 +00:00
|
|
|
new_cfd->SetLogNumber(edit->log_number_);
|
2014-01-10 23:12:34 +00:00
|
|
|
return new_cfd;
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|