2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/version_set.h"
|
2014-02-26 22:16:23 +00:00
|
|
|
|
2014-05-14 19:13:50 +00:00
|
|
|
#define __STDC_FORMAT_MACROS
|
2014-02-26 22:16:23 +00:00
|
|
|
#include <inttypes.h>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <algorithm>
|
2014-01-22 19:44:53 +00:00
|
|
|
#include <map>
|
2014-01-29 23:26:43 +00:00
|
|
|
#include <set>
|
2013-06-14 05:09:08 +00:00
|
|
|
#include <climits>
|
2014-02-28 00:18:23 +00:00
|
|
|
#include <unordered_map>
|
2014-07-02 16:54:20 +00:00
|
|
|
#include <vector>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <stdio.h>
|
2014-01-28 05:58:46 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/filename.h"
|
|
|
|
#include "db/log_reader.h"
|
|
|
|
#include "db/log_writer.h"
|
|
|
|
#include "db/memtable.h"
|
2013-12-03 02:34:05 +00:00
|
|
|
#include "db/merge_context.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/table_cache.h"
|
2014-01-16 00:22:34 +00:00
|
|
|
#include "db/compaction.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/merge_operator.h"
|
2014-01-28 05:58:46 +00:00
|
|
|
#include "table/table_reader.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/merger.h"
|
|
|
|
#include "table/two_level_iterator.h"
|
2014-02-14 00:28:21 +00:00
|
|
|
#include "table/format.h"
|
2014-04-25 19:23:07 +00:00
|
|
|
#include "table/plain_table_factory.h"
|
2014-02-14 00:28:21 +00:00
|
|
|
#include "table/meta_blocks.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/logging.h"
|
2013-06-05 18:06:21 +00:00
|
|
|
#include "util/stop_watch.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-07-16 20:33:02 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Find File in FileLevel data structure
|
|
|
|
// Within an index range defined by left and right
|
|
|
|
int FindFileInRange(const InternalKeyComparator& icmp,
|
|
|
|
const FileLevel& file_level,
|
|
|
|
const Slice& key,
|
|
|
|
uint32_t left,
|
|
|
|
uint32_t right) {
|
|
|
|
while (left < right) {
|
|
|
|
uint32_t mid = (left + right) / 2;
|
|
|
|
const FdWithKeyRange& f = file_level.files[mid];
|
|
|
|
if (icmp.InternalKeyComparator::Compare(f.largest_key, key) < 0) {
|
|
|
|
// Key at "mid.largest" is < "target". Therefore all
|
|
|
|
// files at or before "mid" are uninteresting.
|
|
|
|
left = mid + 1;
|
|
|
|
} else {
|
|
|
|
// Key at "mid.largest" is >= "target". Therefore all files
|
|
|
|
// after "mid" are uninteresting.
|
|
|
|
right = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return right;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool NewestFirstBySeqNo(FileMetaData* a, FileMetaData* b) {
|
|
|
|
if (a->smallest_seqno != b->smallest_seqno) {
|
|
|
|
return a->smallest_seqno > b->smallest_seqno;
|
|
|
|
}
|
|
|
|
if (a->largest_seqno != b->largest_seqno) {
|
|
|
|
return a->largest_seqno > b->largest_seqno;
|
|
|
|
}
|
|
|
|
// Break ties by file number
|
|
|
|
return a->fd.GetNumber() > b->fd.GetNumber();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BySmallestKey(FileMetaData* a, FileMetaData* b,
|
|
|
|
const InternalKeyComparator* cmp) {
|
|
|
|
int r = cmp->Compare(a->smallest, b->smallest);
|
|
|
|
if (r != 0) {
|
|
|
|
return (r < 0);
|
|
|
|
}
|
|
|
|
// Break ties by file number
|
|
|
|
return (a->fd.GetNumber() < b->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Class to help choose the next file to search for the particular key.
|
|
|
|
// Searches and returns files level by level.
|
|
|
|
// We can search level-by-level since entries never hop across
|
|
|
|
// levels. Therefore we are guaranteed that if we find data
|
|
|
|
// in a smaller level, later levels are irrelevant (unless we
|
|
|
|
// are MergeInProgress).
|
|
|
|
class FilePicker {
|
|
|
|
public:
|
|
|
|
FilePicker(
|
|
|
|
std::vector<FileMetaData*>* files,
|
|
|
|
const Slice& user_key,
|
|
|
|
const Slice& ikey,
|
|
|
|
autovector<FileLevel>* file_levels,
|
|
|
|
unsigned int num_levels,
|
|
|
|
FileIndexer* file_indexer,
|
|
|
|
const Comparator* user_comparator,
|
|
|
|
const InternalKeyComparator* internal_comparator)
|
|
|
|
: num_levels_(num_levels),
|
|
|
|
curr_level_(-1),
|
|
|
|
search_left_bound_(0),
|
|
|
|
search_right_bound_(FileIndexer::kLevelMaxIndex),
|
2014-07-17 22:07:05 +00:00
|
|
|
#ifndef NDEBUG
|
2014-07-16 20:33:02 +00:00
|
|
|
files_(files),
|
2014-07-17 22:07:05 +00:00
|
|
|
#endif
|
2014-07-16 20:33:02 +00:00
|
|
|
file_levels_(file_levels),
|
|
|
|
user_key_(user_key),
|
|
|
|
ikey_(ikey),
|
|
|
|
file_indexer_(file_indexer),
|
|
|
|
user_comparator_(user_comparator),
|
|
|
|
internal_comparator_(internal_comparator) {
|
|
|
|
// Setup member variables to search first level.
|
|
|
|
search_ended_ = !PrepareNextLevel();
|
|
|
|
if (!search_ended_) {
|
|
|
|
// Prefetch Level 0 table data to avoid cache miss if possible.
|
|
|
|
for (unsigned int i = 0; i < (*file_levels_)[0].num_files; ++i) {
|
|
|
|
auto* r = (*file_levels_)[0].files[i].fd.table_reader;
|
|
|
|
if (r) {
|
|
|
|
r->Prepare(ikey);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FdWithKeyRange* GetNextFile() {
|
|
|
|
while (!search_ended_) { // Loops over different levels.
|
|
|
|
while (curr_index_in_curr_level_ < curr_file_level_->num_files) {
|
|
|
|
// Loops over all files in current level.
|
|
|
|
FdWithKeyRange* f = &curr_file_level_->files[curr_index_in_curr_level_];
|
|
|
|
int cmp_largest = -1;
|
|
|
|
|
|
|
|
// Do key range filtering of files or/and fractional cascading if:
|
|
|
|
// (1) not all the files are in level 0, or
|
|
|
|
// (2) there are more than 3 Level 0 files
|
|
|
|
// If there are only 3 or less level 0 files in the system, we skip
|
|
|
|
// the key range filtering. In this case, more likely, the system is
|
|
|
|
// highly tuned to minimize number of tables queried by each query,
|
|
|
|
// so it is unlikely that key range filtering is more efficient than
|
|
|
|
// querying the files.
|
|
|
|
if (num_levels_ > 1 || curr_file_level_->num_files > 3) {
|
|
|
|
// Check if key is within a file's range. If search left bound and
|
|
|
|
// right bound point to the same find, we are sure key falls in
|
|
|
|
// range.
|
|
|
|
assert(
|
|
|
|
curr_level_ == 0 ||
|
|
|
|
curr_index_in_curr_level_ == start_index_in_curr_level_ ||
|
|
|
|
user_comparator_->Compare(user_key_,
|
|
|
|
ExtractUserKey(f->smallest_key)) <= 0);
|
|
|
|
|
|
|
|
int cmp_smallest = user_comparator_->Compare(user_key_,
|
|
|
|
ExtractUserKey(f->smallest_key));
|
|
|
|
if (cmp_smallest >= 0) {
|
|
|
|
cmp_largest = user_comparator_->Compare(user_key_,
|
|
|
|
ExtractUserKey(f->largest_key));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup file search bound for the next level based on the
|
|
|
|
// comparison results
|
|
|
|
if (curr_level_ > 0) {
|
|
|
|
file_indexer_->GetNextLevelIndex(curr_level_,
|
|
|
|
curr_index_in_curr_level_,
|
|
|
|
cmp_smallest, cmp_largest,
|
|
|
|
&search_left_bound_,
|
|
|
|
&search_right_bound_);
|
|
|
|
}
|
|
|
|
// Key falls out of current file's range
|
|
|
|
if (cmp_smallest < 0 || cmp_largest > 0) {
|
|
|
|
if (curr_level_ == 0) {
|
|
|
|
++curr_index_in_curr_level_;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
// Search next level.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Sanity check to make sure that the files are correctly sorted
|
|
|
|
if (prev_file_) {
|
|
|
|
if (curr_level_ != 0) {
|
|
|
|
int comp_sign = internal_comparator_->Compare(
|
|
|
|
prev_file_->largest_key, f->smallest_key);
|
|
|
|
assert(comp_sign < 0);
|
|
|
|
} else {
|
|
|
|
// level == 0, the current file cannot be newer than the previous
|
|
|
|
// one. Use compressed data structure, has no attribute seqNo
|
|
|
|
assert(curr_index_in_curr_level_ > 0);
|
|
|
|
assert(!NewestFirstBySeqNo(files_[0][curr_index_in_curr_level_],
|
|
|
|
files_[0][curr_index_in_curr_level_-1]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
prev_file_ = f;
|
|
|
|
#endif
|
|
|
|
if (curr_level_ > 0 && cmp_largest < 0) {
|
|
|
|
// No more files to search in this level.
|
|
|
|
search_ended_ = !PrepareNextLevel();
|
|
|
|
} else {
|
|
|
|
++curr_index_in_curr_level_;
|
|
|
|
}
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
// Start searching next level.
|
|
|
|
search_ended_ = !PrepareNextLevel();
|
|
|
|
}
|
|
|
|
// Search ended.
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
unsigned int num_levels_;
|
|
|
|
unsigned int curr_level_;
|
|
|
|
int search_left_bound_;
|
|
|
|
int search_right_bound_;
|
2014-07-17 22:07:05 +00:00
|
|
|
#ifndef NDEBUG
|
2014-07-16 20:33:02 +00:00
|
|
|
std::vector<FileMetaData*>* files_;
|
2014-07-17 22:07:05 +00:00
|
|
|
#endif
|
2014-07-16 20:33:02 +00:00
|
|
|
autovector<FileLevel>* file_levels_;
|
|
|
|
bool search_ended_;
|
|
|
|
FileLevel* curr_file_level_;
|
|
|
|
unsigned int curr_index_in_curr_level_;
|
|
|
|
unsigned int start_index_in_curr_level_;
|
|
|
|
Slice user_key_;
|
|
|
|
Slice ikey_;
|
|
|
|
FileIndexer* file_indexer_;
|
|
|
|
const Comparator* user_comparator_;
|
|
|
|
const InternalKeyComparator* internal_comparator_;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
FdWithKeyRange* prev_file_;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Setup local variables to search next level.
|
|
|
|
// Returns false if there are no more levels to search.
|
|
|
|
bool PrepareNextLevel() {
|
|
|
|
curr_level_++;
|
|
|
|
while (curr_level_ < num_levels_) {
|
|
|
|
curr_file_level_ = &(*file_levels_)[curr_level_];
|
|
|
|
if (curr_file_level_->num_files == 0) {
|
|
|
|
// When current level is empty, the search bound generated from upper
|
|
|
|
// level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is
|
|
|
|
// also empty.
|
|
|
|
assert(search_left_bound_ == 0);
|
|
|
|
assert(search_right_bound_ == -1 ||
|
|
|
|
search_right_bound_ == FileIndexer::kLevelMaxIndex);
|
|
|
|
// Since current level is empty, it will need to search all files in
|
|
|
|
// the next level
|
|
|
|
search_left_bound_ = 0;
|
|
|
|
search_right_bound_ = FileIndexer::kLevelMaxIndex;
|
|
|
|
curr_level_++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some files may overlap each other. We find
|
|
|
|
// all files that overlap user_key and process them in order from
|
|
|
|
// newest to oldest. In the context of merge-operator, this can occur at
|
|
|
|
// any level. Otherwise, it only occurs at Level-0 (since Put/Deletes
|
|
|
|
// are always compacted into a single entry).
|
|
|
|
int32_t start_index;
|
|
|
|
if (curr_level_ == 0) {
|
|
|
|
// On Level-0, we read through all files to check for overlap.
|
|
|
|
start_index = 0;
|
|
|
|
} else {
|
|
|
|
// On Level-n (n>=1), files are sorted. Binary search to find the
|
|
|
|
// earliest file whose largest key >= ikey. Search left bound and
|
|
|
|
// right bound are used to narrow the range.
|
|
|
|
if (search_left_bound_ == search_right_bound_) {
|
|
|
|
start_index = search_left_bound_;
|
|
|
|
} else if (search_left_bound_ < search_right_bound_) {
|
|
|
|
if (search_right_bound_ == FileIndexer::kLevelMaxIndex) {
|
|
|
|
search_right_bound_ = curr_file_level_->num_files - 1;
|
|
|
|
}
|
|
|
|
start_index = FindFileInRange(*internal_comparator_,
|
|
|
|
*curr_file_level_, ikey_,
|
|
|
|
search_left_bound_, search_right_bound_);
|
|
|
|
} else {
|
|
|
|
// search_left_bound > search_right_bound, key does not exist in
|
|
|
|
// this level. Since no comparision is done in this level, it will
|
|
|
|
// need to search all files in the next level.
|
|
|
|
search_left_bound_ = 0;
|
|
|
|
search_right_bound_ = FileIndexer::kLevelMaxIndex;
|
|
|
|
curr_level_++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
start_index_in_curr_level_ = start_index;
|
|
|
|
curr_index_in_curr_level_ = start_index;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
prev_file_ = nullptr;
|
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// curr_level_ = num_levels_. So, no more levels to search.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2013-07-17 20:56:24 +00:00
|
|
|
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
|
|
|
uint64_t sum = 0;
|
2012-08-27 07:50:26 +00:00
|
|
|
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
2014-06-13 22:54:19 +00:00
|
|
|
sum += files[i]->fd.GetFileSize();
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
2014-06-24 22:37:06 +00:00
|
|
|
static uint64_t TotalCompensatedFileSize(
|
|
|
|
const std::vector<FileMetaData*>& files) {
|
|
|
|
uint64_t sum = 0;
|
|
|
|
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
|
|
|
sum += files[i]->compensated_file_size;
|
|
|
|
}
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Version::~Version() {
|
|
|
|
assert(refs_ == 0);
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
// Remove from linked list
|
|
|
|
prev_->next_ = next_;
|
|
|
|
next_->prev_ = prev_;
|
|
|
|
|
|
|
|
// Drop references to files
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
2011-04-20 22:48:11 +00:00
|
|
|
for (size_t i = 0; i < files_[level].size(); i++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
FileMetaData* f = files_[level][i];
|
2011-05-21 02:17:43 +00:00
|
|
|
assert(f->refs > 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
f->refs--;
|
|
|
|
if (f->refs <= 0) {
|
2014-01-07 04:29:17 +00:00
|
|
|
if (f->table_reader_handle) {
|
2014-02-06 23:42:16 +00:00
|
|
|
cfd_->table_cache()->ReleaseHandle(f->table_reader_handle);
|
2014-01-07 04:29:17 +00:00
|
|
|
f->table_reader_handle = nullptr;
|
|
|
|
}
|
2013-11-08 23:23:46 +00:00
|
|
|
vset_->obsolete_files_.push_back(f);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-06-23 02:30:03 +00:00
|
|
|
delete[] files_;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
int FindFile(const InternalKeyComparator& icmp,
|
|
|
|
const FileLevel& file_level,
|
|
|
|
const Slice& key) {
|
|
|
|
return FindFileInRange(icmp, file_level, key, 0, file_level.num_files);
|
|
|
|
}
|
|
|
|
|
2014-07-11 19:52:41 +00:00
|
|
|
void DoGenerateFileLevel(FileLevel* file_level,
|
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
Arena* arena) {
|
|
|
|
assert(file_level);
|
|
|
|
assert(files.size() >= 0);
|
|
|
|
assert(arena);
|
|
|
|
|
|
|
|
size_t num = files.size();
|
|
|
|
file_level->num_files = num;
|
|
|
|
char* mem = arena->AllocateAligned(num * sizeof(FdWithKeyRange));
|
|
|
|
file_level->files = new (mem)FdWithKeyRange[num];
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num; i++) {
|
|
|
|
Slice smallest_key = files[i]->smallest.Encode();
|
|
|
|
Slice largest_key = files[i]->largest.Encode();
|
|
|
|
|
|
|
|
// Copy key slice to sequential memory
|
|
|
|
size_t smallest_size = smallest_key.size();
|
|
|
|
size_t largest_size = largest_key.size();
|
|
|
|
mem = arena->AllocateAligned(smallest_size + largest_size);
|
|
|
|
memcpy(mem, smallest_key.data(), smallest_size);
|
|
|
|
memcpy(mem + smallest_size, largest_key.data(), largest_size);
|
|
|
|
|
|
|
|
FdWithKeyRange& f = file_level->files[i];
|
|
|
|
f.fd = files[i]->fd;
|
|
|
|
f.smallest_key = Slice(mem, smallest_size);
|
|
|
|
f.largest_key = Slice(mem + smallest_size, largest_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
static bool AfterFile(const Comparator* ucmp,
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const Slice* user_key, const FdWithKeyRange* f) {
|
2013-03-01 02:04:58 +00:00
|
|
|
// nullptr user_key occurs before all keys and is therefore never after *f
|
|
|
|
return (user_key != nullptr &&
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
ucmp->Compare(*user_key, ExtractUserKey(f->largest_key)) > 0);
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool BeforeFile(const Comparator* ucmp,
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const Slice* user_key, const FdWithKeyRange* f) {
|
2013-03-01 02:04:58 +00:00
|
|
|
// nullptr user_key occurs after all keys and is therefore never before *f
|
|
|
|
return (user_key != nullptr &&
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
ucmp->Compare(*user_key, ExtractUserKey(f->smallest_key)) < 0);
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
bool SomeFileOverlapsRange(
|
|
|
|
const InternalKeyComparator& icmp,
|
2011-10-05 23:30:28 +00:00
|
|
|
bool disjoint_sorted_files,
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const FileLevel& file_level,
|
2011-10-05 23:30:28 +00:00
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key) {
|
|
|
|
const Comparator* ucmp = icmp.user_comparator();
|
|
|
|
if (!disjoint_sorted_files) {
|
|
|
|
// Need to check against all files
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
for (size_t i = 0; i < file_level.num_files; i++) {
|
|
|
|
const FdWithKeyRange* f = &(file_level.files[i]);
|
2011-10-05 23:30:28 +00:00
|
|
|
if (AfterFile(ucmp, smallest_user_key, f) ||
|
|
|
|
BeforeFile(ucmp, largest_user_key, f)) {
|
|
|
|
// No overlap
|
|
|
|
} else {
|
|
|
|
return true; // Overlap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Binary search over file list
|
|
|
|
uint32_t index = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (smallest_user_key != nullptr) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// Find the earliest possible internal key for smallest_user_key
|
|
|
|
InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
index = FindFile(icmp, file_level, small.Encode());
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
if (index >= file_level.num_files) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// beginning of range is after all files, so no overlap.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
return !BeforeFile(ucmp, largest_user_key, &file_level.files[index]);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// An internal iterator. For a given version/level pair, yields
|
|
|
|
// information about the files in the level. For a given entry, key()
|
|
|
|
// is the largest key that occurs in the file, and value() is an
|
2011-03-28 20:43:44 +00:00
|
|
|
// 16-byte value containing the file number and file size, both
|
|
|
|
// encoded using EncodeFixed64.
|
2011-03-18 22:37:00 +00:00
|
|
|
class Version::LevelFileNumIterator : public Iterator {
|
|
|
|
public:
|
2011-05-21 02:17:43 +00:00
|
|
|
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
2014-07-11 19:52:41 +00:00
|
|
|
const FileLevel* flevel)
|
2011-05-21 02:17:43 +00:00
|
|
|
: icmp_(icmp),
|
2014-07-11 19:52:41 +00:00
|
|
|
flevel_(flevel),
|
|
|
|
index_(flevel->num_files),
|
2014-07-02 16:54:20 +00:00
|
|
|
current_value_(0, 0, 0) { // Marks as invalid
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual bool Valid() const {
|
2014-07-11 19:52:41 +00:00
|
|
|
return index_ < flevel_->num_files;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual void Seek(const Slice& target) {
|
2014-07-11 19:52:41 +00:00
|
|
|
index_ = FindFile(icmp_, *flevel_, target);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual void SeekToFirst() { index_ = 0; }
|
|
|
|
virtual void SeekToLast() {
|
2014-07-11 19:52:41 +00:00
|
|
|
index_ = (flevel_->num_files == 0) ? 0 : flevel_->num_files - 1;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual void Next() {
|
|
|
|
assert(Valid());
|
|
|
|
index_++;
|
|
|
|
}
|
|
|
|
virtual void Prev() {
|
|
|
|
assert(Valid());
|
|
|
|
if (index_ == 0) {
|
2014-07-11 19:52:41 +00:00
|
|
|
index_ = flevel_->num_files; // Marks as invalid
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
|
|
|
index_--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice key() const {
|
|
|
|
assert(Valid());
|
2014-07-11 19:52:41 +00:00
|
|
|
return flevel_->files[index_].largest_key;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
Slice value() const {
|
|
|
|
assert(Valid());
|
2014-07-11 19:52:41 +00:00
|
|
|
|
|
|
|
auto file_meta = flevel_->files[index_];
|
|
|
|
current_value_ = file_meta.fd;
|
2014-04-02 01:36:18 +00:00
|
|
|
return Slice(reinterpret_cast<const char*>(¤t_value_),
|
2014-06-13 22:54:19 +00:00
|
|
|
sizeof(FileDescriptor));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual Status status() const { return Status::OK(); }
|
|
|
|
private:
|
|
|
|
const InternalKeyComparator icmp_;
|
2014-07-11 19:52:41 +00:00
|
|
|
const FileLevel* flevel_;
|
2011-04-20 22:48:11 +00:00
|
|
|
uint32_t index_;
|
2014-06-13 22:54:19 +00:00
|
|
|
mutable FileDescriptor current_value_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2014-04-25 19:22:23 +00:00
|
|
|
class Version::LevelFileIteratorState : public TwoLevelIteratorState {
|
|
|
|
public:
|
|
|
|
LevelFileIteratorState(TableCache* table_cache,
|
|
|
|
const ReadOptions& read_options, const EnvOptions& env_options,
|
|
|
|
const InternalKeyComparator& icomparator, bool for_compaction,
|
|
|
|
bool prefix_enabled)
|
|
|
|
: TwoLevelIteratorState(prefix_enabled),
|
|
|
|
table_cache_(table_cache), read_options_(read_options),
|
|
|
|
env_options_(env_options), icomparator_(icomparator),
|
|
|
|
for_compaction_(for_compaction) {}
|
|
|
|
|
|
|
|
Iterator* NewSecondaryIterator(const Slice& meta_handle) override {
|
2014-06-13 22:54:19 +00:00
|
|
|
if (meta_handle.size() != sizeof(FileDescriptor)) {
|
2014-04-25 19:22:23 +00:00
|
|
|
return NewErrorIterator(
|
|
|
|
Status::Corruption("FileReader invoked with unexpected value"));
|
|
|
|
} else {
|
2014-06-13 22:54:19 +00:00
|
|
|
const FileDescriptor* fd =
|
|
|
|
reinterpret_cast<const FileDescriptor*>(meta_handle.data());
|
|
|
|
return table_cache_->NewIterator(
|
|
|
|
read_options_, env_options_, icomparator_, *fd,
|
|
|
|
nullptr /* don't need reference to table*/, for_compaction_);
|
2014-04-25 19:22:23 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-04-25 19:22:23 +00:00
|
|
|
bool PrefixMayMatch(const Slice& internal_key) override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
TableCache* table_cache_;
|
|
|
|
const ReadOptions read_options_;
|
|
|
|
const EnvOptions& env_options_;
|
|
|
|
const InternalKeyComparator& icomparator_;
|
|
|
|
bool for_compaction_;
|
|
|
|
};
|
2013-08-23 21:49:57 +00:00
|
|
|
|
2014-06-24 22:37:06 +00:00
|
|
|
Status Version::GetTableProperties(std::shared_ptr<const TableProperties>* tp,
|
|
|
|
const FileMetaData* file_meta,
|
|
|
|
const std::string* fname) {
|
2014-02-15 01:02:10 +00:00
|
|
|
auto table_cache = cfd_->table_cache();
|
2014-03-11 21:52:17 +00:00
|
|
|
auto options = cfd_->options();
|
2014-06-24 22:37:06 +00:00
|
|
|
Status s = table_cache->GetTableProperties(
|
|
|
|
vset_->storage_options_, cfd_->internal_comparator(), file_meta->fd,
|
|
|
|
tp, true /* no io */);
|
|
|
|
if (s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We only ignore error type `Incomplete` since it's by design that we
|
|
|
|
// disallow table when it's not in table cache.
|
|
|
|
if (!s.IsIncomplete()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. Table is not present in table cache, we'll read the table properties
|
|
|
|
// directly from the properties block in the file.
|
|
|
|
std::unique_ptr<RandomAccessFile> file;
|
|
|
|
if (fname != nullptr) {
|
|
|
|
s = options->env->NewRandomAccessFile(
|
|
|
|
*fname, &file, vset_->storage_options_);
|
|
|
|
} else {
|
|
|
|
s = options->env->NewRandomAccessFile(
|
2014-07-02 16:54:20 +00:00
|
|
|
TableFileName(vset_->options_->db_paths, file_meta->fd.GetNumber(),
|
|
|
|
file_meta->fd.GetPathId()),
|
2014-06-24 22:37:06 +00:00
|
|
|
&file, vset_->storage_options_);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
TableProperties* raw_table_properties;
|
|
|
|
// By setting the magic number to kInvalidTableMagicNumber, we can by
|
|
|
|
// pass the magic number check in the footer.
|
|
|
|
s = ReadTableProperties(
|
|
|
|
file.get(), file_meta->fd.GetFileSize(),
|
|
|
|
Footer::kInvalidTableMagicNumber /* table's magic number */,
|
|
|
|
vset_->env_, options->info_log.get(), &raw_table_properties);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-07-28 19:05:36 +00:00
|
|
|
RecordTick(options->statistics.get(), NUMBER_DIRECT_LOAD_TABLE_PROPERTIES);
|
2014-06-24 22:37:06 +00:00
|
|
|
|
|
|
|
*tp = std::shared_ptr<const TableProperties>(raw_table_properties);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props) {
|
2014-02-14 00:28:21 +00:00
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
|
|
|
for (const auto& file_meta : files_[level]) {
|
2014-07-02 16:54:20 +00:00
|
|
|
auto fname =
|
|
|
|
TableFileName(vset_->options_->db_paths, file_meta->fd.GetNumber(),
|
|
|
|
file_meta->fd.GetPathId());
|
2014-02-14 00:28:21 +00:00
|
|
|
// 1. If the table is already present in table cache, load table
|
|
|
|
// properties from there.
|
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
2014-06-24 22:37:06 +00:00
|
|
|
Status s = GetTableProperties(&table_properties, file_meta, &fname);
|
2014-02-14 00:28:21 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
props->insert({fname, table_properties});
|
2014-06-24 22:37:06 +00:00
|
|
|
} else {
|
2014-02-14 00:28:21 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-07-28 21:50:16 +00:00
|
|
|
uint64_t Version::GetEstimatedActiveKeys() {
|
|
|
|
// Estimation will be not accurate when:
|
|
|
|
// (1) there is merge keys
|
|
|
|
// (2) keys are directly overwritten
|
|
|
|
// (3) deletion on non-existing keys
|
|
|
|
return num_non_deletions_ - num_deletions_;
|
|
|
|
}
|
|
|
|
|
2014-04-25 19:23:07 +00:00
|
|
|
void Version::AddIterators(const ReadOptions& read_options,
|
2013-06-07 22:35:17 +00:00
|
|
|
const EnvOptions& soptions,
|
2011-03-18 22:37:00 +00:00
|
|
|
std::vector<Iterator*>* iters) {
|
|
|
|
// Merge all level zero files together since they may overlap
|
2014-07-11 19:52:41 +00:00
|
|
|
for (size_t i = 0; i < file_levels_[0].num_files; i++) {
|
|
|
|
const auto& file = file_levels_[0].files[i];
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
iters->push_back(cfd_->table_cache()->NewIterator(
|
2014-07-11 19:52:41 +00:00
|
|
|
read_options, soptions, cfd_->internal_comparator(), file.fd));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// For levels > 0, we can use a concatenating iterator that sequentially
|
|
|
|
// walks through the non-overlapping files in the level, opening them
|
|
|
|
// lazily.
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 1; level < num_levels_; level++) {
|
2014-07-11 19:52:41 +00:00
|
|
|
if (file_levels_[level].num_files != 0) {
|
2014-04-25 19:22:23 +00:00
|
|
|
iters->push_back(NewTwoLevelIterator(new LevelFileIteratorState(
|
2014-04-25 19:23:07 +00:00
|
|
|
cfd_->table_cache(), read_options, soptions,
|
2014-04-25 19:22:23 +00:00
|
|
|
cfd_->internal_comparator(), false /* for_compaction */,
|
|
|
|
cfd_->options()->prefix_extractor != nullptr),
|
2014-07-11 19:52:41 +00:00
|
|
|
new LevelFileNumIterator(cfd_->internal_comparator(),
|
|
|
|
&file_levels_[level])));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
void Version::AddIterators(const ReadOptions& read_options,
|
|
|
|
const EnvOptions& soptions,
|
|
|
|
MergeIteratorBuilder* merge_iter_builder) {
|
|
|
|
// Merge all level zero files together since they may overlap
|
2014-07-11 19:52:41 +00:00
|
|
|
for (size_t i = 0; i < file_levels_[0].num_files; i++) {
|
|
|
|
const auto& file = file_levels_[0].files[i];
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
merge_iter_builder->AddIterator(cfd_->table_cache()->NewIterator(
|
2014-07-11 19:52:41 +00:00
|
|
|
read_options, soptions, cfd_->internal_comparator(), file.fd, nullptr,
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
false, merge_iter_builder->GetArena()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// For levels > 0, we can use a concatenating iterator that sequentially
|
|
|
|
// walks through the non-overlapping files in the level, opening them
|
|
|
|
// lazily.
|
|
|
|
for (int level = 1; level < num_levels_; level++) {
|
2014-07-11 19:52:41 +00:00
|
|
|
if (file_levels_[level].num_files != 0) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
merge_iter_builder->AddIterator(NewTwoLevelIterator(
|
|
|
|
new LevelFileIteratorState(
|
|
|
|
cfd_->table_cache(), read_options, soptions,
|
|
|
|
cfd_->internal_comparator(), false /* for_compaction */,
|
|
|
|
cfd_->options()->prefix_extractor != nullptr),
|
2014-07-11 19:52:41 +00:00
|
|
|
new LevelFileNumIterator(cfd_->internal_comparator(),
|
|
|
|
&file_levels_[level]), merge_iter_builder->GetArena()));
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Callback from TableCache::Get()
|
|
|
|
namespace {
|
|
|
|
enum SaverState {
|
|
|
|
kNotFound,
|
|
|
|
kFound,
|
|
|
|
kDeleted,
|
|
|
|
kCorrupt,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
kMerge // saver contains the current merge result (the operands)
|
2012-04-17 15:36:46 +00:00
|
|
|
};
|
|
|
|
struct Saver {
|
|
|
|
SaverState state;
|
|
|
|
const Comparator* ucmp;
|
|
|
|
Slice user_key;
|
2013-07-26 19:57:01 +00:00
|
|
|
bool* value_found; // Is value set correctly? Used by KeyMayExist
|
2012-04-17 15:36:46 +00:00
|
|
|
std::string* value;
|
2013-03-21 22:59:47 +00:00
|
|
|
const MergeOperator* merge_operator;
|
2013-12-03 02:34:05 +00:00
|
|
|
// the merge operations encountered;
|
|
|
|
MergeContext* merge_context;
|
2013-03-21 22:59:47 +00:00
|
|
|
Logger* logger;
|
2013-11-22 22:14:05 +00:00
|
|
|
Statistics* statistics;
|
2012-04-17 15:36:46 +00:00
|
|
|
};
|
|
|
|
}
|
2013-07-06 01:49:18 +00:00
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
// Called from TableCache::Get and Table::Get when file/block in which
|
|
|
|
// key may exist are not there in TableCache/BlockCache respectively. In this
|
|
|
|
// case we can't guarantee that key does not exist and are not permitted to do
|
|
|
|
// IO to be certain.Set the status=kFound and value_found=false to let the
|
|
|
|
// caller know that key may exist but is not there in memory
|
2013-07-06 01:49:18 +00:00
|
|
|
static void MarkKeyMayExist(void* arg) {
|
|
|
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
|
|
|
s->state = kFound;
|
2013-07-26 19:57:01 +00:00
|
|
|
if (s->value_found != nullptr) {
|
|
|
|
*(s->value_found) = false;
|
|
|
|
}
|
2013-07-06 01:49:18 +00:00
|
|
|
}
|
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
static bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
|
2014-06-20 08:23:02 +00:00
|
|
|
const Slice& v) {
|
2012-04-17 15:36:46 +00:00
|
|
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
2013-12-03 02:34:05 +00:00
|
|
|
MergeContext* merge_contex = s->merge_context;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
std::string merge_result; // temporary area for merge results later
|
|
|
|
|
2013-12-03 02:34:05 +00:00
|
|
|
assert(s != nullptr && merge_contex != nullptr);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
|
2014-06-20 08:23:02 +00:00
|
|
|
// TODO: Merge?
|
2014-01-27 21:53:22 +00:00
|
|
|
if (s->ucmp->Compare(parsed_key.user_key, s->user_key) == 0) {
|
|
|
|
// Key matches. Process it
|
|
|
|
switch (parsed_key.type) {
|
|
|
|
case kTypeValue:
|
|
|
|
if (kNotFound == s->state) {
|
|
|
|
s->state = kFound;
|
|
|
|
s->value->assign(v.data(), v.size());
|
|
|
|
} else if (kMerge == s->state) {
|
|
|
|
assert(s->merge_operator != nullptr);
|
|
|
|
s->state = kFound;
|
|
|
|
if (!s->merge_operator->FullMerge(s->user_key, &v,
|
|
|
|
merge_contex->GetOperands(),
|
|
|
|
s->value, s->logger)) {
|
|
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
|
|
s->state = kCorrupt;
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
} else {
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
return false;
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
case kTypeDeletion:
|
|
|
|
if (kNotFound == s->state) {
|
|
|
|
s->state = kDeleted;
|
|
|
|
} else if (kMerge == s->state) {
|
|
|
|
s->state = kFound;
|
2013-12-03 02:34:05 +00:00
|
|
|
if (!s->merge_operator->FullMerge(s->user_key, nullptr,
|
|
|
|
merge_contex->GetOperands(),
|
|
|
|
s->value, s->logger)) {
|
2014-01-27 21:53:22 +00:00
|
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
|
|
s->state = kCorrupt;
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
} else {
|
2013-08-14 23:32:46 +00:00
|
|
|
assert(false);
|
2014-01-27 21:53:22 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case kTypeMerge:
|
|
|
|
assert(s->state == kNotFound || s->state == kMerge);
|
|
|
|
s->state = kMerge;
|
|
|
|
merge_contex->PushOperand(v);
|
2014-03-25 00:57:13 +00:00
|
|
|
return true;
|
2014-01-27 21:53:22 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
// s->state could be Corrupt, merge or notfound
|
|
|
|
|
|
|
|
return false;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
Version::Version(ColumnFamilyData* cfd, VersionSet* vset,
|
|
|
|
uint64_t version_number)
|
|
|
|
: cfd_(cfd),
|
2014-04-17 21:07:05 +00:00
|
|
|
internal_comparator_((cfd == nullptr) ? nullptr
|
|
|
|
: &cfd->internal_comparator()),
|
2014-07-28 21:50:16 +00:00
|
|
|
user_comparator_(
|
|
|
|
(cfd == nullptr) ? nullptr : internal_comparator_->user_comparator()),
|
2014-04-17 21:07:05 +00:00
|
|
|
table_cache_((cfd == nullptr) ? nullptr : cfd->table_cache()),
|
|
|
|
merge_operator_((cfd == nullptr) ? nullptr
|
|
|
|
: cfd->options()->merge_operator.get()),
|
|
|
|
info_log_((cfd == nullptr) ? nullptr : cfd->options()->info_log.get()),
|
|
|
|
db_statistics_((cfd == nullptr) ? nullptr
|
|
|
|
: cfd->options()->statistics.get()),
|
2014-06-13 22:06:10 +00:00
|
|
|
// cfd is nullptr if Version is dummy
|
|
|
|
num_levels_(cfd == nullptr ? 0 : cfd->NumberLevels()),
|
|
|
|
num_non_empty_levels_(num_levels_),
|
2014-07-17 00:39:18 +00:00
|
|
|
file_indexer_(cfd == nullptr
|
|
|
|
? nullptr
|
|
|
|
: cfd->internal_comparator().user_comparator()),
|
2014-01-31 23:30:27 +00:00
|
|
|
vset_(vset),
|
2014-01-16 00:15:43 +00:00
|
|
|
next_(this),
|
|
|
|
prev_(this),
|
|
|
|
refs_(0),
|
|
|
|
files_(new std::vector<FileMetaData*>[num_levels_]),
|
|
|
|
files_by_size_(num_levels_),
|
|
|
|
next_file_to_compact_by_size_(num_levels_),
|
|
|
|
compaction_score_(num_levels_),
|
|
|
|
compaction_level_(num_levels_),
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
version_number_(version_number),
|
2014-06-24 22:37:06 +00:00
|
|
|
total_file_size_(0),
|
|
|
|
total_raw_key_size_(0),
|
|
|
|
total_raw_value_size_(0),
|
2014-07-28 21:50:16 +00:00
|
|
|
num_non_deletions_(0),
|
|
|
|
num_deletions_(0) {
|
2014-07-09 19:46:08 +00:00
|
|
|
if (cfd != nullptr && cfd->current() != nullptr) {
|
|
|
|
total_file_size_ = cfd->current()->total_file_size_;
|
|
|
|
total_raw_key_size_ = cfd->current()->total_raw_key_size_;
|
|
|
|
total_raw_value_size_ = cfd->current()->total_raw_value_size_;
|
|
|
|
num_non_deletions_ = cfd->current()->num_non_deletions_;
|
2014-07-28 21:50:16 +00:00
|
|
|
num_deletions_ = cfd->current()->num_deletions_;
|
2014-07-09 19:46:08 +00:00
|
|
|
}
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
}
|
2012-06-23 02:30:03 +00:00
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
void Version::Get(const ReadOptions& options,
|
|
|
|
const LookupKey& k,
|
|
|
|
std::string* value,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
Status* status,
|
2013-12-03 02:34:05 +00:00
|
|
|
MergeContext* merge_context,
|
2013-07-26 19:57:01 +00:00
|
|
|
bool* value_found) {
|
2011-06-22 02:36:45 +00:00
|
|
|
Slice ikey = k.internal_key();
|
|
|
|
Slice user_key = k.user_key();
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
assert(status->ok() || status->IsMergeInProgress());
|
|
|
|
Saver saver;
|
|
|
|
saver.state = status->ok()? kNotFound : kMerge;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.ucmp = user_comparator_;
|
2013-03-21 22:59:47 +00:00
|
|
|
saver.user_key = user_key;
|
2013-07-26 19:57:01 +00:00
|
|
|
saver.value_found = value_found;
|
2013-03-21 22:59:47 +00:00
|
|
|
saver.value = value;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.merge_operator = merge_operator_;
|
2013-12-03 02:34:05 +00:00
|
|
|
saver.merge_context = merge_context;
|
2014-04-17 21:07:05 +00:00
|
|
|
saver.logger = info_log_;
|
|
|
|
saver.statistics = db_statistics_;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-07-16 20:33:02 +00:00
|
|
|
FilePicker fp(files_, user_key, ikey, &file_levels_, num_non_empty_levels_,
|
|
|
|
&file_indexer_, user_comparator_, internal_comparator_);
|
|
|
|
FdWithKeyRange* f = fp.GetNextFile();
|
|
|
|
while (f != nullptr) {
|
|
|
|
*status = table_cache_->Get(options, *internal_comparator_, f->fd, ikey,
|
|
|
|
&saver, SaveValue, MarkKeyMayExist);
|
|
|
|
// TODO: examine the behavior for corrupted key
|
|
|
|
if (!status->ok()) {
|
|
|
|
return;
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-07-16 20:33:02 +00:00
|
|
|
switch (saver.state) {
|
|
|
|
case kNotFound:
|
|
|
|
break; // Keep searching in other files
|
|
|
|
case kFound:
|
2013-03-21 22:59:47 +00:00
|
|
|
return;
|
2014-07-16 20:33:02 +00:00
|
|
|
case kDeleted:
|
|
|
|
*status = Status::NotFound(); // Use empty error message for speed
|
|
|
|
return;
|
|
|
|
case kCorrupt:
|
|
|
|
*status = Status::Corruption("corrupted key for ", user_key);
|
|
|
|
return;
|
|
|
|
case kMerge:
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
break;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
2014-07-16 20:33:02 +00:00
|
|
|
f = fp.GetNextFile();
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
if (kMerge == saver.state) {
|
2014-07-31 00:24:36 +00:00
|
|
|
if (!merge_operator_) {
|
|
|
|
*status = Status::InvalidArgument(
|
|
|
|
"merge_operator is not properly initialized.");
|
|
|
|
return;
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// merge_operands are in saver and we hit the beginning of the key history
|
|
|
|
// do a final merge of nullptr and operands;
|
2014-04-17 21:07:05 +00:00
|
|
|
if (merge_operator_->FullMerge(user_key, nullptr,
|
|
|
|
saver.merge_context->GetOperands(), value,
|
|
|
|
info_log_)) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
*status = Status::OK();
|
|
|
|
} else {
|
2014-04-17 21:07:05 +00:00
|
|
|
RecordTick(db_statistics_, NUMBER_MERGE_FAILURES);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
*status = Status::Corruption("could not perform end-of-key merge for ",
|
|
|
|
user_key);
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
} else {
|
2013-12-26 21:49:04 +00:00
|
|
|
*status = Status::NotFound(); // Use an empty error message for speed
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
void Version::GenerateFileLevels() {
|
|
|
|
file_levels_.resize(num_non_empty_levels_);
|
|
|
|
for (int level = 0; level < num_non_empty_levels_; level++) {
|
2014-07-11 19:52:41 +00:00
|
|
|
DoGenerateFileLevel(&file_levels_[level], files_[level], &arena_);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-13 22:06:10 +00:00
|
|
|
void Version::PrepareApply(std::vector<uint64_t>& size_being_compacted) {
|
2014-07-09 19:46:08 +00:00
|
|
|
UpdateTemporaryStats();
|
2014-06-13 22:06:10 +00:00
|
|
|
ComputeCompactionScore(size_being_compacted);
|
|
|
|
UpdateFilesBySize();
|
|
|
|
UpdateNumNonEmptyLevels();
|
2014-07-16 18:21:30 +00:00
|
|
|
file_indexer_.UpdateIndex(&arena_, num_non_empty_levels_, files_);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
GenerateFileLevels();
|
2014-06-13 22:06:10 +00:00
|
|
|
}
|
|
|
|
|
2014-06-24 22:37:06 +00:00
|
|
|
bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) {
|
|
|
|
if (file_meta->num_entries > 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
std::shared_ptr<const TableProperties> tp;
|
|
|
|
Status s = GetTableProperties(&tp, file_meta);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (tp.get() == nullptr) return false;
|
|
|
|
file_meta->num_entries = tp->num_entries;
|
|
|
|
file_meta->num_deletions = GetDeletedKeys(tp->user_collected_properties);
|
|
|
|
file_meta->raw_value_size = tp->raw_value_size;
|
|
|
|
file_meta->raw_key_size = tp->raw_key_size;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-07-09 19:46:08 +00:00
|
|
|
void Version::UpdateTemporaryStats() {
|
2014-06-24 22:37:06 +00:00
|
|
|
static const int kDeletionWeightOnCompaction = 2;
|
|
|
|
|
|
|
|
// incrementally update the average value size by
|
|
|
|
// including newly added files into the global stats
|
|
|
|
int init_count = 0;
|
|
|
|
int total_count = 0;
|
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
|
|
|
for (auto* file_meta : files_[level]) {
|
|
|
|
if (MaybeInitializeFileMetaData(file_meta)) {
|
|
|
|
// each FileMeta will be initialized only once.
|
|
|
|
total_file_size_ += file_meta->fd.GetFileSize();
|
|
|
|
total_raw_key_size_ += file_meta->raw_key_size;
|
|
|
|
total_raw_value_size_ += file_meta->raw_value_size;
|
|
|
|
num_non_deletions_ +=
|
|
|
|
file_meta->num_entries - file_meta->num_deletions;
|
2014-07-28 21:50:16 +00:00
|
|
|
num_deletions_ += file_meta->num_deletions;
|
2014-06-24 22:37:06 +00:00
|
|
|
init_count++;
|
|
|
|
}
|
|
|
|
total_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t average_value_size = GetAverageValueSize();
|
|
|
|
|
|
|
|
// compute the compensated size
|
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
|
|
|
for (auto* file_meta : files_[level]) {
|
2014-07-09 19:46:08 +00:00
|
|
|
// Here we only compute compensated_file_size for those file_meta
|
|
|
|
// which compensated_file_size is uninitialized (== 0).
|
|
|
|
if (file_meta->compensated_file_size == 0) {
|
|
|
|
file_meta->compensated_file_size = file_meta->fd.GetFileSize() +
|
|
|
|
file_meta->num_deletions * average_value_size *
|
|
|
|
kDeletionWeightOnCompaction;
|
|
|
|
}
|
2014-06-24 22:37:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-19 23:52:26 +00:00
|
|
|
void Version::ComputeCompactionScore(
|
|
|
|
std::vector<uint64_t>& size_being_compacted) {
|
2014-01-16 00:23:36 +00:00
|
|
|
double max_score = 0;
|
|
|
|
int max_score_level = 0;
|
|
|
|
|
2014-07-18 01:01:04 +00:00
|
|
|
int max_input_level =
|
|
|
|
cfd_->compaction_picker()->MaxInputLevel(NumberLevels());
|
2014-01-16 00:23:36 +00:00
|
|
|
|
2014-07-18 01:01:04 +00:00
|
|
|
for (int level = 0; level <= max_input_level; level++) {
|
2014-01-16 00:23:36 +00:00
|
|
|
double score;
|
|
|
|
if (level == 0) {
|
|
|
|
// We treat level-0 specially by bounding the number of files
|
|
|
|
// instead of number of bytes for two reasons:
|
|
|
|
//
|
|
|
|
// (1) With larger write-buffer sizes, it is nice not to do too
|
|
|
|
// many level-0 compactions.
|
|
|
|
//
|
|
|
|
// (2) The files in level-0 are merged on every read and
|
|
|
|
// therefore we wish to avoid too many files when the individual
|
|
|
|
// file size is small (perhaps because of a small write-buffer
|
|
|
|
// setting, or very high compression ratios, or lots of
|
|
|
|
// overwrites/deletions).
|
|
|
|
int numfiles = 0;
|
2014-05-21 18:43:35 +00:00
|
|
|
uint64_t total_size = 0;
|
2014-01-16 00:23:36 +00:00
|
|
|
for (unsigned int i = 0; i < files_[level].size(); i++) {
|
|
|
|
if (!files_[level][i]->being_compacted) {
|
2014-06-24 22:37:06 +00:00
|
|
|
total_size += files_[level][i]->compensated_file_size;
|
2014-01-16 00:23:36 +00:00
|
|
|
numfiles++;
|
|
|
|
}
|
|
|
|
}
|
2014-05-21 18:43:35 +00:00
|
|
|
if (cfd_->options()->compaction_style == kCompactionStyleFIFO) {
|
|
|
|
score = static_cast<double>(total_size) /
|
|
|
|
cfd_->options()->compaction_options_fifo.max_table_files_size;
|
|
|
|
} else if (numfiles >= cfd_->options()->level0_stop_writes_trigger) {
|
|
|
|
// If we are slowing down writes, then we better compact that first
|
2014-01-16 00:23:36 +00:00
|
|
|
score = 1000000;
|
2014-01-31 23:30:27 +00:00
|
|
|
} else if (numfiles >= cfd_->options()->level0_slowdown_writes_trigger) {
|
2014-01-16 00:23:36 +00:00
|
|
|
score = 10000;
|
|
|
|
} else {
|
|
|
|
score = static_cast<double>(numfiles) /
|
2014-01-31 23:30:27 +00:00
|
|
|
cfd_->options()->level0_file_num_compaction_trigger;
|
2014-01-16 00:23:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Compute the ratio of current size to size limit.
|
|
|
|
const uint64_t level_bytes =
|
2014-06-24 22:37:06 +00:00
|
|
|
TotalCompensatedFileSize(files_[level]) - size_being_compacted[level];
|
2014-01-31 23:30:27 +00:00
|
|
|
score = static_cast<double>(level_bytes) /
|
|
|
|
cfd_->compaction_picker()->MaxBytesForLevel(level);
|
2014-01-16 00:23:36 +00:00
|
|
|
if (max_score < score) {
|
|
|
|
max_score = score;
|
|
|
|
max_score_level = level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
compaction_level_[level] = level;
|
|
|
|
compaction_score_[level] = score;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the max compaction score in levels 1 to n-1
|
|
|
|
max_compaction_score_ = max_score;
|
|
|
|
max_compaction_score_level_ = max_score_level;
|
|
|
|
|
|
|
|
// sort all the levels based on their score. Higher scores get listed
|
|
|
|
// first. Use bubble sort because the number of entries are small.
|
|
|
|
for (int i = 0; i < NumberLevels() - 2; i++) {
|
|
|
|
for (int j = i + 1; j < NumberLevels() - 1; j++) {
|
|
|
|
if (compaction_score_[i] < compaction_score_[j]) {
|
|
|
|
double score = compaction_score_[i];
|
|
|
|
int level = compaction_level_[i];
|
|
|
|
compaction_score_[i] = compaction_score_[j];
|
|
|
|
compaction_level_[i] = compaction_level_[j];
|
|
|
|
compaction_score_[j] = score;
|
|
|
|
compaction_level_[j] = level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// Compator that is used to sort files based on their size
|
|
|
|
// In normal mode: descending size
|
2014-06-24 22:37:06 +00:00
|
|
|
bool CompareCompensatedSizeDescending(const Version::Fsize& first,
|
|
|
|
const Version::Fsize& second) {
|
|
|
|
return (first.file->compensated_file_size >
|
|
|
|
second.file->compensated_file_size);
|
2014-01-16 00:23:36 +00:00
|
|
|
}
|
2014-01-16 07:12:31 +00:00
|
|
|
} // anonymous namespace
|
2014-01-16 00:23:36 +00:00
|
|
|
|
2014-06-13 22:06:10 +00:00
|
|
|
void Version::UpdateNumNonEmptyLevels() {
|
|
|
|
num_non_empty_levels_ = num_levels_;
|
|
|
|
for (int i = num_levels_ - 1; i >= 0; i--) {
|
|
|
|
if (files_[i].size() != 0) {
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
num_non_empty_levels_ = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-16 00:23:36 +00:00
|
|
|
void Version::UpdateFilesBySize() {
|
2014-07-01 06:55:04 +00:00
|
|
|
if (cfd_->options()->compaction_style == kCompactionStyleFIFO ||
|
|
|
|
cfd_->options()->compaction_style == kCompactionStyleUniversal) {
|
2014-05-21 18:43:35 +00:00
|
|
|
// don't need this
|
|
|
|
return;
|
|
|
|
}
|
2014-01-16 00:23:36 +00:00
|
|
|
// No need to sort the highest level because it is never compacted.
|
2014-07-01 06:55:04 +00:00
|
|
|
for (int level = 0; level < NumberLevels() - 1; level++) {
|
2014-01-16 00:23:36 +00:00
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
2014-07-01 06:55:04 +00:00
|
|
|
auto& files_by_size = files_by_size_[level];
|
2014-01-16 00:23:36 +00:00
|
|
|
assert(files_by_size.size() == 0);
|
|
|
|
|
|
|
|
// populate a temp vector for sorting based on size
|
|
|
|
std::vector<Fsize> temp(files.size());
|
|
|
|
for (unsigned int i = 0; i < files.size(); i++) {
|
|
|
|
temp[i].index = i;
|
|
|
|
temp[i].file = files[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort the top number_of_files_to_sort_ based on file size
|
2014-07-01 06:55:04 +00:00
|
|
|
size_t num = Version::number_of_files_to_sort_;
|
|
|
|
if (num > temp.size()) {
|
|
|
|
num = temp.size();
|
2014-01-16 00:23:36 +00:00
|
|
|
}
|
2014-07-01 06:55:04 +00:00
|
|
|
std::partial_sort(temp.begin(), temp.begin() + num, temp.end(),
|
|
|
|
CompareCompensatedSizeDescending);
|
2014-01-16 00:23:36 +00:00
|
|
|
assert(temp.size() == files.size());
|
|
|
|
|
|
|
|
// initialize files_by_size_
|
|
|
|
for (unsigned int i = 0; i < temp.size(); i++) {
|
|
|
|
files_by_size.push_back(temp[i].index);
|
|
|
|
}
|
|
|
|
next_file_to_compact_by_size_[level] = 0;
|
|
|
|
assert(files_[level].size() == files_by_size_[level].size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void Version::Ref() {
|
|
|
|
++refs_;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:56:36 +00:00
|
|
|
bool Version::Unref() {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(refs_ >= 1);
|
|
|
|
--refs_;
|
|
|
|
if (refs_ == 0) {
|
2011-05-21 02:17:43 +00:00
|
|
|
delete this;
|
2013-12-11 19:56:36 +00:00
|
|
|
return true;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-12-11 19:56:36 +00:00
|
|
|
return false;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-27 17:59:00 +00:00
|
|
|
bool Version::NeedsCompaction() const {
|
|
|
|
// In universal compaction case, this check doesn't really
|
|
|
|
// check the compaction condition, but checks num of files threshold
|
|
|
|
// only. We are not going to miss any compaction opportunity
|
|
|
|
// but it's likely that more compactions are scheduled but
|
|
|
|
// ending up with nothing to do. We can improve it later.
|
|
|
|
// TODO(sdong): improve this function to be accurate for universal
|
|
|
|
// compactions.
|
2014-07-18 01:01:04 +00:00
|
|
|
int max_input_level =
|
|
|
|
cfd_->compaction_picker()->MaxInputLevel(NumberLevels());
|
|
|
|
|
|
|
|
for (int i = 0; i <= max_input_level; i++) {
|
2014-01-27 17:59:00 +00:00
|
|
|
if (compaction_score_[i] >= 1) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
bool Version::OverlapInLevel(int level,
|
2011-10-05 23:30:28 +00:00
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key) {
|
2014-01-31 23:30:27 +00:00
|
|
|
return SomeFileOverlapsRange(cfd_->internal_comparator(), (level > 0),
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
file_levels_[level], smallest_user_key,
|
2014-01-31 23:30:27 +00:00
|
|
|
largest_user_key);
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int Version::PickLevelForMemTableOutput(
|
|
|
|
const Slice& smallest_user_key,
|
|
|
|
const Slice& largest_user_key) {
|
|
|
|
int level = 0;
|
|
|
|
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
|
|
|
|
// Push to next level if there is no overlap in next level,
|
|
|
|
// and the #bytes overlapping in the level after that are limited.
|
|
|
|
InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek);
|
|
|
|
InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0));
|
|
|
|
std::vector<FileMetaData*> overlaps;
|
2014-01-31 23:30:27 +00:00
|
|
|
int max_mem_compact_level = cfd_->options()->max_mem_compaction_level;
|
2012-06-23 02:30:03 +00:00
|
|
|
while (max_mem_compact_level > 0 && level < max_mem_compact_level) {
|
2011-10-05 23:30:28 +00:00
|
|
|
if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
|
|
|
|
break;
|
|
|
|
}
|
2014-01-16 00:15:43 +00:00
|
|
|
if (level + 2 >= num_levels_) {
|
2012-10-31 18:47:18 +00:00
|
|
|
level++;
|
|
|
|
break;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
2011-10-05 23:30:28 +00:00
|
|
|
GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
|
2013-07-17 20:56:24 +00:00
|
|
|
const uint64_t sum = TotalFileSize(overlaps);
|
2014-01-31 23:30:27 +00:00
|
|
|
if (sum > cfd_->compaction_picker()->MaxGrandParentOverlapBytes(level)) {
|
2011-10-05 23:30:28 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
level++;
|
|
|
|
}
|
|
|
|
}
|
2012-06-23 02:30:03 +00:00
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
return level;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
2012-11-29 00:42:36 +00:00
|
|
|
// If hint_index is specified, then it points to a file in the
|
2012-11-06 17:06:16 +00:00
|
|
|
// overlapping range.
|
|
|
|
// The file_index returns a pointer to any file in an overlapping range.
|
2014-01-10 23:12:34 +00:00
|
|
|
void Version::GetOverlappingInputs(int level,
|
|
|
|
const InternalKey* begin,
|
|
|
|
const InternalKey* end,
|
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index,
|
|
|
|
int* file_index) {
|
2011-10-05 23:30:28 +00:00
|
|
|
inputs->clear();
|
|
|
|
Slice user_begin, user_end;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr) {
|
2011-10-05 23:30:28 +00:00
|
|
|
user_begin = begin->user_key();
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
if (end != nullptr) {
|
2011-10-05 23:30:28 +00:00
|
|
|
user_end = end->user_key();
|
|
|
|
}
|
Assertion failure while running with unit tests with OPT=-g
Summary:
When we expand the range of keys for a level 0 compaction, we
need to invoke ParentFilesInCompaction() only once for the
entire range of keys that is being compacted. We were invoking
it for each file that was being compacted, but this triggers
an assertion because each file's range were contiguous but
non-overlapping.
I renamed ParentFilesInCompaction to ParentRangeInCompaction
to adequately represent that it is the range-of-keys and
not individual files that we compact in a single compaction run.
Here is the assertion that is fixed by this patch.
db_test: db/version_set.cc:585: void leveldb::Version::ExtendOverlappingInputs(int, const leveldb::Slice&, const leveldb::Slice&, std::vector<leveldb::FileMetaData*, std::allocator<leveldb::FileMetaData*> >*, int): Assertion `user_cmp->Compare(flimit, user_begin) >= 0' failed.
Test Plan: make clean check OPT=-g
Reviewers: sheki
Reviewed By: sheki
CC: MarkCallaghan, emayanke, leveldb
Differential Revision: https://reviews.facebook.net/D6963
2012-11-26 09:49:50 +00:00
|
|
|
if (file_index) {
|
|
|
|
*file_index = -1;
|
|
|
|
}
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr && end != nullptr && level > 0) {
|
2012-11-06 17:06:16 +00:00
|
|
|
GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs,
|
|
|
|
hint_index, file_index);
|
2012-11-05 07:47:06 +00:00
|
|
|
return;
|
|
|
|
}
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
for (size_t i = 0; i < file_levels_[level].num_files; ) {
|
|
|
|
FdWithKeyRange* f = &(file_levels_[level].files[i++]);
|
|
|
|
const Slice file_start = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice file_limit = ExtractUserKey(f->largest_key);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// "f" is completely before specified range; skip it
|
2013-03-01 02:04:58 +00:00
|
|
|
} else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
|
2011-10-05 23:30:28 +00:00
|
|
|
// "f" is completely after specified range; skip it
|
|
|
|
} else {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
inputs->push_back(files_[level][i-1]);
|
2011-10-31 17:22:06 +00:00
|
|
|
if (level == 0) {
|
|
|
|
// Level-0 files may overlap each other. So check if the newly
|
|
|
|
// added file has expanded the range. If so, restart search.
|
2013-03-01 02:04:58 +00:00
|
|
|
if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
|
2011-10-31 17:22:06 +00:00
|
|
|
user_begin = file_start;
|
|
|
|
inputs->clear();
|
|
|
|
i = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
} else if (end != nullptr
|
|
|
|
&& user_cmp->Compare(file_limit, user_end) > 0) {
|
2011-10-31 17:22:06 +00:00
|
|
|
user_end = file_limit;
|
|
|
|
inputs->clear();
|
|
|
|
i = 0;
|
|
|
|
}
|
2012-11-06 17:06:16 +00:00
|
|
|
} else if (file_index) {
|
Assertion failure while running with unit tests with OPT=-g
Summary:
When we expand the range of keys for a level 0 compaction, we
need to invoke ParentFilesInCompaction() only once for the
entire range of keys that is being compacted. We were invoking
it for each file that was being compacted, but this triggers
an assertion because each file's range were contiguous but
non-overlapping.
I renamed ParentFilesInCompaction to ParentRangeInCompaction
to adequately represent that it is the range-of-keys and
not individual files that we compact in a single compaction run.
Here is the assertion that is fixed by this patch.
db_test: db/version_set.cc:585: void leveldb::Version::ExtendOverlappingInputs(int, const leveldb::Slice&, const leveldb::Slice&, std::vector<leveldb::FileMetaData*, std::allocator<leveldb::FileMetaData*> >*, int): Assertion `user_cmp->Compare(flimit, user_begin) >= 0' failed.
Test Plan: make clean check OPT=-g
Reviewers: sheki
Reviewed By: sheki
CC: MarkCallaghan, emayanke, leveldb
Differential Revision: https://reviews.facebook.net/D6963
2012-11-26 09:49:50 +00:00
|
|
|
*file_index = i-1;
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2012-11-05 07:47:06 +00:00
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
|
|
|
// Employ binary search to find at least one file that overlaps the
|
|
|
|
// specified range. From that file, iterate backwards and
|
|
|
|
// forwards to find all overlapping files.
|
|
|
|
void Version::GetOverlappingInputsBinarySearch(
|
|
|
|
int level,
|
|
|
|
const Slice& user_begin,
|
|
|
|
const Slice& user_end,
|
2012-11-06 17:06:16 +00:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index,
|
|
|
|
int* file_index) {
|
2012-11-05 07:47:06 +00:00
|
|
|
assert(level > 0);
|
|
|
|
int min = 0;
|
|
|
|
int mid = 0;
|
|
|
|
int max = files_[level].size() -1;
|
|
|
|
bool foundOverlap = false;
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
2012-11-06 17:06:16 +00:00
|
|
|
|
|
|
|
// if the caller already knows the index of a file that has overlap,
|
|
|
|
// then we can skip the binary search.
|
|
|
|
if (hint_index != -1) {
|
|
|
|
mid = hint_index;
|
|
|
|
foundOverlap = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!foundOverlap && min <= max) {
|
2012-11-05 07:47:06 +00:00
|
|
|
mid = (min + max)/2;
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
FdWithKeyRange* f = &(file_levels_[level].files[mid]);
|
|
|
|
const Slice file_start = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice file_limit = ExtractUserKey(f->largest_key);
|
2012-11-05 07:47:06 +00:00
|
|
|
if (user_cmp->Compare(file_limit, user_begin) < 0) {
|
|
|
|
min = mid + 1;
|
|
|
|
} else if (user_cmp->Compare(user_end, file_start) < 0) {
|
|
|
|
max = mid - 1;
|
|
|
|
} else {
|
|
|
|
foundOverlap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2012-11-05 07:47:06 +00:00
|
|
|
// If there were no overlapping files, return immediately.
|
|
|
|
if (!foundOverlap) {
|
|
|
|
return;
|
|
|
|
}
|
2012-11-06 17:06:16 +00:00
|
|
|
// returns the index where an overlap is found
|
|
|
|
if (file_index) {
|
|
|
|
*file_index = mid;
|
|
|
|
}
|
2012-11-05 07:47:06 +00:00
|
|
|
ExtendOverlappingInputs(level, user_begin, user_end, inputs, mid);
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2012-11-05 07:47:06 +00:00
|
|
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
|
|
|
// The midIndex specifies the index of at least one file that
|
|
|
|
// overlaps the specified range. From that file, iterate backward
|
|
|
|
// and forward to find all overlapping files.
|
2014-07-11 19:52:41 +00:00
|
|
|
// Use FileLevel in searching, make it faster
|
2012-11-05 07:47:06 +00:00
|
|
|
void Version::ExtendOverlappingInputs(
|
|
|
|
int level,
|
|
|
|
const Slice& user_begin,
|
|
|
|
const Slice& user_end,
|
|
|
|
std::vector<FileMetaData*>* inputs,
|
2013-03-15 01:32:01 +00:00
|
|
|
unsigned int midIndex) {
|
2012-11-05 07:47:06 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const FdWithKeyRange* files = file_levels_[level].files;
|
2012-11-06 17:06:16 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
{
|
|
|
|
// assert that the file at midIndex overlaps with the range
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
assert(midIndex < file_levels_[level].num_files);
|
|
|
|
const FdWithKeyRange* f = &files[midIndex];
|
|
|
|
const Slice fstart = ExtractUserKey(f->smallest_key);
|
|
|
|
const Slice flimit = ExtractUserKey(f->largest_key);
|
2012-11-06 17:06:16 +00:00
|
|
|
if (user_cmp->Compare(fstart, user_begin) >= 0) {
|
|
|
|
assert(user_cmp->Compare(fstart, user_end) <= 0);
|
|
|
|
} else {
|
|
|
|
assert(user_cmp->Compare(flimit, user_begin) >= 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2012-12-31 06:18:52 +00:00
|
|
|
int startIndex = midIndex + 1;
|
|
|
|
int endIndex = midIndex;
|
2013-01-14 20:39:24 +00:00
|
|
|
int count __attribute__((unused)) = 0;
|
2012-11-05 07:47:06 +00:00
|
|
|
|
|
|
|
// check backwards from 'mid' to lower indices
|
2012-12-31 06:18:52 +00:00
|
|
|
for (int i = midIndex; i >= 0 ; i--) {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const FdWithKeyRange* f = &files[i];
|
|
|
|
const Slice file_limit = ExtractUserKey(f->largest_key);
|
2012-11-05 07:47:06 +00:00
|
|
|
if (user_cmp->Compare(file_limit, user_begin) >= 0) {
|
2012-12-31 06:18:52 +00:00
|
|
|
startIndex = i;
|
|
|
|
assert((count++, true));
|
2012-11-05 07:47:06 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// check forward from 'mid+1' to higher indices
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
for (unsigned int i = midIndex+1; i < file_levels_[level].num_files; i++) {
|
|
|
|
const FdWithKeyRange* f = &files[i];
|
|
|
|
const Slice file_start = ExtractUserKey(f->smallest_key);
|
2012-11-05 07:47:06 +00:00
|
|
|
if (user_cmp->Compare(file_start, user_end) <= 0) {
|
2012-12-31 06:18:52 +00:00
|
|
|
assert((count++, true));
|
|
|
|
endIndex = i;
|
2012-11-05 07:47:06 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-12-31 06:18:52 +00:00
|
|
|
assert(count == endIndex - startIndex + 1);
|
|
|
|
|
|
|
|
// insert overlapping files into vector
|
|
|
|
for (int i = startIndex; i <= endIndex; i++) {
|
|
|
|
FileMetaData* f = files_[level][i];
|
2013-01-08 20:00:13 +00:00
|
|
|
inputs->push_back(f);
|
2012-12-31 06:18:52 +00:00
|
|
|
}
|
2012-11-05 07:47:06 +00:00
|
|
|
}
|
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// Returns true iff the first or last file in inputs contains
|
|
|
|
// an overlapping user key to the file "just outside" of it (i.e.
|
|
|
|
// just after the last file, or just before the first file)
|
|
|
|
// REQUIRES: "*inputs" is a sorted list of non-overlapping files
|
|
|
|
bool Version::HasOverlappingUserKey(
|
|
|
|
const std::vector<FileMetaData*>* inputs,
|
|
|
|
int level) {
|
|
|
|
|
|
|
|
// If inputs empty, there is no overlap.
|
|
|
|
// If level == 0, it is assumed that all needed files were already included.
|
|
|
|
if (inputs->empty() || level == 0){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
const Comparator* user_cmp = cfd_->internal_comparator().user_comparator();
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const FileLevel& file_level = file_levels_[level];
|
|
|
|
const FdWithKeyRange* files = file_levels_[level].files;
|
|
|
|
const size_t kNumFiles = file_level.num_files;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
|
|
|
|
// Check the last file in inputs against the file after it
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
size_t last_file = FindFile(cfd_->internal_comparator(), file_level,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
inputs->back()->largest.Encode());
|
|
|
|
assert(0 <= last_file && last_file < kNumFiles); // File should exist!
|
|
|
|
if (last_file < kNumFiles-1) { // If not the last file
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const Slice last_key_in_input = ExtractUserKey(
|
|
|
|
files[last_file].largest_key);
|
|
|
|
const Slice first_key_after = ExtractUserKey(
|
|
|
|
files[last_file+1].smallest_key);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
if (user_cmp->Compare(last_key_in_input, first_key_after) == 0) {
|
|
|
|
// The last user key in input overlaps with the next file's first key
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the first file in inputs against the file just before it
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
size_t first_file = FindFile(cfd_->internal_comparator(), file_level,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
inputs->front()->smallest.Encode());
|
|
|
|
assert(0 <= first_file && first_file <= last_file); // File should exist!
|
|
|
|
if (first_file > 0) { // If not first file
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
const Slice& first_key_in_input = ExtractUserKey(
|
|
|
|
files[first_file].smallest_key);
|
|
|
|
const Slice& last_key_before = ExtractUserKey(
|
|
|
|
files[first_file-1].largest_key);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
if (user_cmp->Compare(first_key_in_input, last_key_before) == 0) {
|
|
|
|
// The first user key in input overlaps with the previous file's last key
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-01-16 00:18:04 +00:00
|
|
|
int64_t Version::NumLevelBytes(int level) const {
|
|
|
|
assert(level >= 0);
|
|
|
|
assert(level < NumberLevels());
|
|
|
|
return TotalFileSize(files_[level]);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Version::LevelSummary(LevelSummaryStorage* scratch) const {
|
|
|
|
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files[");
|
|
|
|
for (int i = 0; i < NumberLevels(); i++) {
|
|
|
|
int sz = sizeof(scratch->buffer) - len;
|
|
|
|
int ret = snprintf(scratch->buffer + len, sz, "%d ", int(files_[i].size()));
|
|
|
|
if (ret < 0 || ret >= sz) break;
|
|
|
|
len += ret;
|
|
|
|
}
|
2014-05-14 19:13:50 +00:00
|
|
|
if (len > 0) {
|
|
|
|
// overwrite the last space
|
|
|
|
--len;
|
|
|
|
}
|
2014-01-16 00:18:04 +00:00
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
|
|
|
|
return scratch->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Version::LevelFileSummary(FileSummaryStorage* scratch,
|
|
|
|
int level) const {
|
|
|
|
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
|
|
|
|
for (const auto& f : files_[level]) {
|
|
|
|
int sz = sizeof(scratch->buffer) - len;
|
2014-05-14 19:13:50 +00:00
|
|
|
char sztxt[16];
|
2014-06-13 22:54:19 +00:00
|
|
|
AppendHumanBytes(f->fd.GetFileSize(), sztxt, 16);
|
2014-01-16 00:18:04 +00:00
|
|
|
int ret = snprintf(scratch->buffer + len, sz,
|
2014-06-13 22:54:19 +00:00
|
|
|
"#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ",
|
|
|
|
f->fd.GetNumber(), f->smallest_seqno, sztxt,
|
2014-05-14 19:13:50 +00:00
|
|
|
static_cast<int>(f->being_compacted));
|
2014-01-16 00:18:04 +00:00
|
|
|
if (ret < 0 || ret >= sz)
|
|
|
|
break;
|
|
|
|
len += ret;
|
|
|
|
}
|
2014-05-14 19:13:50 +00:00
|
|
|
// overwrite the last space (only if files_[level].size() is non-zero)
|
|
|
|
if (files_[level].size() && len > 0) {
|
|
|
|
--len;
|
|
|
|
}
|
2014-01-16 00:18:04 +00:00
|
|
|
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
|
|
|
|
return scratch->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t Version::MaxNextLevelOverlappingBytes() {
|
|
|
|
uint64_t result = 0;
|
|
|
|
std::vector<FileMetaData*> overlaps;
|
|
|
|
for (int level = 1; level < NumberLevels() - 1; level++) {
|
|
|
|
for (const auto& f : files_[level]) {
|
|
|
|
GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps);
|
|
|
|
const uint64_t sum = TotalFileSize(overlaps);
|
|
|
|
if (sum > result) {
|
|
|
|
result = sum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-07-02 16:54:20 +00:00
|
|
|
void Version::AddLiveFiles(std::vector<FileDescriptor>* live) {
|
2014-01-16 00:18:04 +00:00
|
|
|
for (int level = 0; level < NumberLevels(); level++) {
|
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
|
|
|
for (const auto& file : files) {
|
2014-07-02 16:54:20 +00:00
|
|
|
live->push_back(file->fd);
|
2014-01-16 00:18:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-16 02:28:36 +00:00
|
|
|
std::string Version::DebugString(bool hex) const {
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string r;
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < num_levels_; level++) {
|
2011-06-22 02:36:45 +00:00
|
|
|
// E.g.,
|
|
|
|
// --- level 1 ---
|
|
|
|
// 17:123['a' .. 'd']
|
|
|
|
// 20:43['e' .. 'g']
|
|
|
|
r.append("--- level ");
|
2011-03-18 22:37:00 +00:00
|
|
|
AppendNumberTo(&r, level);
|
2012-10-19 21:00:53 +00:00
|
|
|
r.append(" --- version# ");
|
|
|
|
AppendNumberTo(&r, version_number_);
|
2011-06-22 02:36:45 +00:00
|
|
|
r.append(" ---\n");
|
2011-03-18 22:37:00 +00:00
|
|
|
const std::vector<FileMetaData*>& files = files_[level];
|
2011-04-20 22:48:11 +00:00
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
r.push_back(' ');
|
2014-06-13 22:54:19 +00:00
|
|
|
AppendNumberTo(&r, files[i]->fd.GetNumber());
|
2011-03-18 22:37:00 +00:00
|
|
|
r.push_back(':');
|
2014-06-13 22:54:19 +00:00
|
|
|
AppendNumberTo(&r, files[i]->fd.GetFileSize());
|
2011-10-05 23:30:28 +00:00
|
|
|
r.append("[");
|
2012-12-16 02:28:36 +00:00
|
|
|
r.append(files[i]->smallest.DebugString(hex));
|
2011-10-05 23:30:28 +00:00
|
|
|
r.append(" .. ");
|
2012-12-16 02:28:36 +00:00
|
|
|
r.append(files[i]->largest.DebugString(hex));
|
2011-10-05 23:30:28 +00:00
|
|
|
r.append("]\n");
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// this is used to batch writes to the manifest file
|
|
|
|
struct VersionSet::ManifestWriter {
|
|
|
|
Status status;
|
|
|
|
bool done;
|
|
|
|
port::CondVar cv;
|
2014-01-31 01:48:42 +00:00
|
|
|
ColumnFamilyData* cfd;
|
2012-10-19 21:00:53 +00:00
|
|
|
VersionEdit* edit;
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2014-01-31 01:48:42 +00:00
|
|
|
explicit ManifestWriter(port::Mutex* mu, ColumnFamilyData* cfd,
|
|
|
|
VersionEdit* e)
|
|
|
|
: done(false), cv(mu), cfd(cfd), edit(e) {}
|
2012-10-19 21:00:53 +00:00
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// A helper class so we can efficiently apply a whole sequence
|
|
|
|
// of edits to a particular state without creating intermediate
|
|
|
|
// Versions that contain full copies of the intermediate state.
|
|
|
|
class VersionSet::Builder {
|
|
|
|
private:
|
2014-03-26 20:30:14 +00:00
|
|
|
// Helper to sort v->files_
|
2014-06-20 07:12:14 +00:00
|
|
|
// kLevel0 -- NewestFirstBySeqNo
|
2014-03-26 20:30:14 +00:00
|
|
|
// kLevelNon0 -- BySmallestKey
|
|
|
|
struct FileComparator {
|
|
|
|
enum SortMethod {
|
2014-06-20 07:12:14 +00:00
|
|
|
kLevel0 = 0,
|
|
|
|
kLevelNon0 = 1,
|
2014-03-26 20:30:14 +00:00
|
|
|
} sort_method;
|
2011-05-21 02:17:43 +00:00
|
|
|
const InternalKeyComparator* internal_comparator;
|
|
|
|
|
|
|
|
bool operator()(FileMetaData* f1, FileMetaData* f2) const {
|
2014-03-26 20:30:14 +00:00
|
|
|
switch (sort_method) {
|
2014-06-20 07:12:14 +00:00
|
|
|
case kLevel0:
|
2014-03-26 20:30:14 +00:00
|
|
|
return NewestFirstBySeqNo(f1, f2);
|
|
|
|
case kLevelNon0:
|
|
|
|
return BySmallestKey(f1, f2, internal_comparator);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2014-03-26 20:30:14 +00:00
|
|
|
assert(false);
|
2014-03-26 21:46:07 +00:00
|
|
|
return false;
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-03-26 20:30:14 +00:00
|
|
|
typedef std::set<FileMetaData*, FileComparator> FileSet;
|
2011-05-21 02:17:43 +00:00
|
|
|
struct LevelState {
|
|
|
|
std::set<uint64_t> deleted_files;
|
|
|
|
FileSet* added_files;
|
|
|
|
};
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
ColumnFamilyData* cfd_;
|
2011-05-21 02:17:43 +00:00
|
|
|
Version* base_;
|
2012-06-23 02:30:03 +00:00
|
|
|
LevelState* levels_;
|
2014-03-26 20:30:14 +00:00
|
|
|
FileComparator level_zero_cmp_;
|
|
|
|
FileComparator level_nonzero_cmp_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
public:
|
2014-02-28 19:25:38 +00:00
|
|
|
Builder(ColumnFamilyData* cfd) : cfd_(cfd), base_(cfd->current()) {
|
2011-05-21 02:17:43 +00:00
|
|
|
base_->Ref();
|
2014-02-28 19:25:38 +00:00
|
|
|
levels_ = new LevelState[base_->NumberLevels()];
|
2014-06-20 07:12:14 +00:00
|
|
|
level_zero_cmp_.sort_method = FileComparator::kLevel0;
|
2014-03-26 20:30:14 +00:00
|
|
|
level_nonzero_cmp_.sort_method = FileComparator::kLevelNon0;
|
2014-03-31 19:44:54 +00:00
|
|
|
level_nonzero_cmp_.internal_comparator = &cfd->internal_comparator();
|
2014-03-26 20:30:14 +00:00
|
|
|
|
|
|
|
levels_[0].added_files = new FileSet(level_zero_cmp_);
|
2014-03-31 19:44:54 +00:00
|
|
|
for (int level = 1; level < base_->NumberLevels(); level++) {
|
2014-03-26 20:30:14 +00:00
|
|
|
levels_[level].added_files = new FileSet(level_nonzero_cmp_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~Builder() {
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < base_->NumberLevels(); level++) {
|
2011-07-19 23:36:47 +00:00
|
|
|
const FileSet* added = levels_[level].added_files;
|
|
|
|
std::vector<FileMetaData*> to_unref;
|
|
|
|
to_unref.reserve(added->size());
|
|
|
|
for (FileSet::const_iterator it = added->begin();
|
|
|
|
it != added->end(); ++it) {
|
|
|
|
to_unref.push_back(*it);
|
|
|
|
}
|
|
|
|
delete added;
|
2011-08-06 00:19:37 +00:00
|
|
|
for (uint32_t i = 0; i < to_unref.size(); i++) {
|
2011-05-21 02:17:43 +00:00
|
|
|
FileMetaData* f = to_unref[i];
|
2011-03-18 22:37:00 +00:00
|
|
|
f->refs--;
|
|
|
|
if (f->refs <= 0) {
|
2014-01-07 04:29:17 +00:00
|
|
|
if (f->table_reader_handle) {
|
2014-02-06 23:42:16 +00:00
|
|
|
cfd_->table_cache()->ReleaseHandle(f->table_reader_handle);
|
2014-01-07 04:29:17 +00:00
|
|
|
f->table_reader_handle = nullptr;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
delete f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-31 02:33:57 +00:00
|
|
|
|
2012-06-23 02:30:03 +00:00
|
|
|
delete[] levels_;
|
2011-05-21 02:17:43 +00:00
|
|
|
base_->Unref();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
void CheckConsistency(Version* v) {
|
|
|
|
#ifndef NDEBUG
|
2014-03-26 20:30:14 +00:00
|
|
|
// make sure the files are sorted correctly
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2014-03-26 20:30:14 +00:00
|
|
|
for (size_t i = 1; i < v->files_[level].size(); i++) {
|
|
|
|
auto f1 = v->files_[level][i - 1];
|
|
|
|
auto f2 = v->files_[level][i];
|
|
|
|
if (level == 0) {
|
|
|
|
assert(level_zero_cmp_(f1, f2));
|
2014-06-20 07:12:14 +00:00
|
|
|
assert(f1->largest_seqno > f2->largest_seqno);
|
2014-03-26 20:30:14 +00:00
|
|
|
} else {
|
|
|
|
assert(level_nonzero_cmp_(f1, f2));
|
|
|
|
|
|
|
|
// Make sure there is no overlap in levels > 0
|
2014-03-31 19:44:54 +00:00
|
|
|
if (cfd_->internal_comparator().Compare(f1->largest, f2->smallest) >=
|
|
|
|
0) {
|
2012-10-19 21:00:53 +00:00
|
|
|
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
|
2014-03-26 20:30:14 +00:00
|
|
|
(f1->largest).DebugString().c_str(),
|
|
|
|
(f2->smallest).DebugString().c_str());
|
2012-10-19 21:00:53 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-07-02 16:54:20 +00:00
|
|
|
void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number,
|
2014-01-14 23:27:09 +00:00
|
|
|
int level) {
|
2012-11-13 18:30:00 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// a file to be deleted better exist in the previous version
|
|
|
|
bool found = false;
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int l = 0; !found && l < base_->NumberLevels(); l++) {
|
2012-11-13 18:30:00 +00:00
|
|
|
const std::vector<FileMetaData*>& base_files = base_->files_[l];
|
2013-03-15 01:32:01 +00:00
|
|
|
for (unsigned int i = 0; i < base_files.size(); i++) {
|
2012-11-13 18:30:00 +00:00
|
|
|
FileMetaData* f = base_files[i];
|
2014-06-13 22:54:19 +00:00
|
|
|
if (f->fd.GetNumber() == number) {
|
2012-11-13 18:30:00 +00:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// if the file did not exist in the previous version, then it
|
|
|
|
// is possibly moved from lower level to higher level in current
|
|
|
|
// version
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int l = level+1; !found && l < base_->NumberLevels(); l++) {
|
2012-11-13 18:30:00 +00:00
|
|
|
const FileSet* added = levels_[l].added_files;
|
2012-11-19 22:51:22 +00:00
|
|
|
for (FileSet::const_iterator added_iter = added->begin();
|
|
|
|
added_iter != added->end(); ++added_iter) {
|
|
|
|
FileMetaData* f = *added_iter;
|
2014-06-13 22:54:19 +00:00
|
|
|
if (f->fd.GetNumber() == number) {
|
2012-11-19 22:51:22 +00:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// maybe this file was added in a previous edit that was Applied
|
|
|
|
if (!found) {
|
|
|
|
const FileSet* added = levels_[level].added_files;
|
2012-11-13 18:30:00 +00:00
|
|
|
for (FileSet::const_iterator added_iter = added->begin();
|
|
|
|
added_iter != added->end(); ++added_iter) {
|
|
|
|
FileMetaData* f = *added_iter;
|
2014-06-13 22:54:19 +00:00
|
|
|
if (f->fd.GetNumber() == number) {
|
2012-11-13 18:30:00 +00:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-07-02 16:54:20 +00:00
|
|
|
if (!found) {
|
2014-07-03 21:03:24 +00:00
|
|
|
fprintf(stderr, "not found %" PRIu64 "\n", number);
|
2014-07-02 16:54:20 +00:00
|
|
|
}
|
2012-11-13 18:30:00 +00:00
|
|
|
assert(found);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Apply all of the edits in *edit to the current state.
|
|
|
|
void Apply(VersionEdit* edit) {
|
2012-10-19 21:00:53 +00:00
|
|
|
CheckConsistency(base_);
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Delete files
|
|
|
|
const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
|
2013-12-31 02:33:57 +00:00
|
|
|
for (const auto& del_file : del) {
|
|
|
|
const auto level = del_file.first;
|
|
|
|
const auto number = del_file.second;
|
2011-05-21 02:17:43 +00:00
|
|
|
levels_[level].deleted_files.insert(number);
|
2012-11-13 18:30:00 +00:00
|
|
|
CheckConsistencyForDeletes(edit, number, level);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add new files
|
2013-12-31 02:33:57 +00:00
|
|
|
for (const auto& new_file : edit->new_files_) {
|
|
|
|
const int level = new_file.first;
|
|
|
|
FileMetaData* f = new FileMetaData(new_file.second);
|
2011-03-18 22:37:00 +00:00
|
|
|
f->refs = 1;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-06-13 22:54:19 +00:00
|
|
|
levels_[level].deleted_files.erase(f->fd.GetNumber());
|
2011-05-21 02:17:43 +00:00
|
|
|
levels_[level].added_files->insert(f);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the current state in *v.
|
|
|
|
void SaveTo(Version* v) {
|
2012-10-19 21:00:53 +00:00
|
|
|
CheckConsistency(base_);
|
|
|
|
CheckConsistency(v);
|
2014-03-31 19:44:54 +00:00
|
|
|
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < base_->NumberLevels(); level++) {
|
2014-03-26 20:30:14 +00:00
|
|
|
const auto& cmp = (level == 0) ? level_zero_cmp_ : level_nonzero_cmp_;
|
2011-05-21 02:17:43 +00:00
|
|
|
// Merge the set of added files with the set of pre-existing files.
|
|
|
|
// Drop any deleted files. Store the result in *v.
|
2013-12-31 02:33:57 +00:00
|
|
|
const auto& base_files = base_->files_[level];
|
|
|
|
auto base_iter = base_files.begin();
|
|
|
|
auto base_end = base_files.end();
|
|
|
|
const auto& added_files = *levels_[level].added_files;
|
|
|
|
v->files_[level].reserve(base_files.size() + added_files.size());
|
|
|
|
|
|
|
|
for (const auto& added : added_files) {
|
2011-05-21 02:17:43 +00:00
|
|
|
// Add all smaller files listed in base_
|
2013-12-31 02:33:57 +00:00
|
|
|
for (auto bpos = std::upper_bound(base_iter, base_end, added, cmp);
|
2011-05-21 02:17:43 +00:00
|
|
|
base_iter != bpos;
|
|
|
|
++base_iter) {
|
|
|
|
MaybeAddFile(v, level, *base_iter);
|
|
|
|
}
|
|
|
|
|
2013-12-31 02:33:57 +00:00
|
|
|
MaybeAddFile(v, level, added);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add remaining base files
|
|
|
|
for (; base_iter != base_end; ++base_iter) {
|
|
|
|
MaybeAddFile(v, level, *base_iter);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2013-12-09 22:28:26 +00:00
|
|
|
|
2012-10-26 01:21:54 +00:00
|
|
|
CheckConsistency(v);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
|
2014-01-07 04:29:17 +00:00
|
|
|
void LoadTableHandlers() {
|
2014-02-06 23:42:16 +00:00
|
|
|
for (int level = 0; level < cfd_->NumberLevels(); level++) {
|
2014-01-07 04:29:17 +00:00
|
|
|
for (auto& file_meta : *(levels_[level].added_files)) {
|
|
|
|
assert (!file_meta->table_reader_handle);
|
2014-02-06 23:42:16 +00:00
|
|
|
cfd_->table_cache()->FindTable(
|
|
|
|
base_->vset_->storage_options_, cfd_->internal_comparator(),
|
2014-06-20 08:23:02 +00:00
|
|
|
file_meta->fd, &file_meta->table_reader_handle, false);
|
2014-04-17 21:07:05 +00:00
|
|
|
if (file_meta->table_reader_handle != nullptr) {
|
|
|
|
// Load table_reader
|
2014-06-13 22:54:19 +00:00
|
|
|
file_meta->fd.table_reader =
|
2014-04-17 21:07:05 +00:00
|
|
|
cfd_->table_cache()->GetTableReaderFromHandle(
|
|
|
|
file_meta->table_reader_handle);
|
|
|
|
}
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
void MaybeAddFile(Version* v, int level, FileMetaData* f) {
|
2014-06-13 22:54:19 +00:00
|
|
|
if (levels_[level].deleted_files.count(f->fd.GetNumber()) > 0) {
|
2011-05-21 02:17:43 +00:00
|
|
|
// File is deleted: do nothing
|
|
|
|
} else {
|
2013-12-31 02:33:57 +00:00
|
|
|
auto* files = &v->files_[level];
|
2011-06-22 02:36:45 +00:00
|
|
|
if (level > 0 && !files->empty()) {
|
|
|
|
// Must not overlap
|
2014-01-31 23:30:27 +00:00
|
|
|
assert(cfd_->internal_comparator().Compare(
|
|
|
|
(*files)[files->size() - 1]->largest, f->smallest) < 0);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
f->refs++;
|
2011-06-22 02:36:45 +00:00
|
|
|
files->push_back(f);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-02-05 21:12:23 +00:00
|
|
|
VersionSet::VersionSet(const std::string& dbname, const DBOptions* options,
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
const EnvOptions& storage_options, Cache* table_cache)
|
|
|
|
: column_family_set_(new ColumnFamilySet(dbname, options, storage_options,
|
|
|
|
table_cache)),
|
2014-01-22 19:44:53 +00:00
|
|
|
env_(options->env),
|
2011-03-18 22:37:00 +00:00
|
|
|
dbname_(dbname),
|
|
|
|
options_(options),
|
|
|
|
next_file_number_(2),
|
|
|
|
manifest_file_number_(0), // Filled by Recover()
|
2014-03-18 04:50:15 +00:00
|
|
|
pending_manifest_file_number_(0),
|
2011-04-12 19:38:58 +00:00
|
|
|
last_sequence_(0),
|
|
|
|
prev_log_number_(0),
|
2013-01-11 01:18:50 +00:00
|
|
|
current_version_number_(0),
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_(0),
|
2013-03-15 00:00:04 +00:00
|
|
|
storage_options_(storage_options),
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
storage_options_compactions_(storage_options_) {}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
VersionSet::~VersionSet() {
|
2014-01-24 22:30:28 +00:00
|
|
|
// we need to delete column_family_set_ because its destructor depends on
|
|
|
|
// VersionSet
|
|
|
|
column_family_set_.reset();
|
2013-11-12 19:53:26 +00:00
|
|
|
for (auto file : obsolete_files_) {
|
|
|
|
delete file;
|
|
|
|
}
|
|
|
|
obsolete_files_.clear();
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
void VersionSet::AppendVersion(ColumnFamilyData* column_family_data,
|
|
|
|
Version* v) {
|
2011-05-21 02:17:43 +00:00
|
|
|
// Make "v" current
|
|
|
|
assert(v->refs_ == 0);
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* current = column_family_data->current();
|
|
|
|
assert(v != current);
|
|
|
|
if (current != nullptr) {
|
|
|
|
assert(current->refs_ > 0);
|
|
|
|
current->Unref();
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2014-01-29 21:28:50 +00:00
|
|
|
column_family_data->SetCurrent(v);
|
2011-05-21 02:17:43 +00:00
|
|
|
v->Ref();
|
|
|
|
|
|
|
|
// Append to linked list
|
2014-01-29 21:28:50 +00:00
|
|
|
v->prev_ = column_family_data->dummy_versions()->prev_;
|
|
|
|
v->next_ = column_family_data->dummy_versions();
|
2011-05-21 02:17:43 +00:00
|
|
|
v->prev_->next_ = v;
|
|
|
|
v->next_->prev_ = v;
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
|
2014-01-27 19:11:51 +00:00
|
|
|
VersionEdit* edit, port::Mutex* mu,
|
2014-02-28 22:05:11 +00:00
|
|
|
Directory* db_directory, bool new_descriptor_log,
|
|
|
|
const ColumnFamilyOptions* options) {
|
2012-10-19 21:00:53 +00:00
|
|
|
mu->AssertHeld();
|
2011-04-12 19:38:58 +00:00
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
// column_family_data can be nullptr only if this is column_family_add.
|
|
|
|
// in that case, we also need to specify ColumnFamilyOptions
|
|
|
|
if (column_family_data == nullptr) {
|
|
|
|
assert(edit->is_column_family_add_);
|
|
|
|
assert(options != nullptr);
|
2014-02-28 22:05:11 +00:00
|
|
|
}
|
2014-02-11 01:04:44 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// queue our request
|
2014-01-31 01:48:42 +00:00
|
|
|
ManifestWriter w(mu, column_family_data, edit);
|
2012-10-19 21:00:53 +00:00
|
|
|
manifest_writers_.push_back(&w);
|
|
|
|
while (!w.done && &w != manifest_writers_.front()) {
|
|
|
|
w.cv.Wait();
|
2011-04-12 19:38:58 +00:00
|
|
|
}
|
2012-10-19 21:00:53 +00:00
|
|
|
if (w.done) {
|
|
|
|
return w.status;
|
|
|
|
}
|
2014-03-11 21:52:17 +00:00
|
|
|
if (column_family_data != nullptr && column_family_data->IsDropped()) {
|
|
|
|
// if column family is dropped by the time we get here, no need to write
|
|
|
|
// anything to the manifest
|
|
|
|
manifest_writers_.pop_front();
|
|
|
|
// Notify new head of write queue
|
|
|
|
if (!manifest_writers_.empty()) {
|
|
|
|
manifest_writers_.front()->cv.Signal();
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
std::vector<VersionEdit*> batch_edits;
|
2014-02-28 22:05:11 +00:00
|
|
|
Version* v = nullptr;
|
|
|
|
std::unique_ptr<Builder> builder(nullptr);
|
2011-04-12 19:38:58 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// process all requests in the queue
|
|
|
|
ManifestWriter* last_writer = &w;
|
|
|
|
assert(!manifest_writers_.empty());
|
2012-11-07 23:11:37 +00:00
|
|
|
assert(manifest_writers_.front() == &w);
|
2014-02-28 22:05:11 +00:00
|
|
|
if (edit->IsColumnFamilyManipulation()) {
|
|
|
|
// no group commits for column family add or drop
|
2014-03-13 01:09:03 +00:00
|
|
|
LogAndApplyCFHelper(edit);
|
2014-02-28 22:05:11 +00:00
|
|
|
batch_edits.push_back(edit);
|
|
|
|
} else {
|
|
|
|
v = new Version(column_family_data, this, current_version_number_++);
|
|
|
|
builder.reset(new Builder(column_family_data));
|
|
|
|
for (const auto& writer : manifest_writers_) {
|
|
|
|
if (writer->edit->IsColumnFamilyManipulation() ||
|
|
|
|
writer->cfd->GetID() != column_family_data->GetID()) {
|
|
|
|
// no group commits for column family add or drop
|
|
|
|
// also, group commits across column families are not supported
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last_writer = writer;
|
|
|
|
LogAndApplyHelper(column_family_data, builder.get(), v, last_writer->edit,
|
|
|
|
mu);
|
|
|
|
batch_edits.push_back(last_writer->edit);
|
2014-01-31 01:48:42 +00:00
|
|
|
}
|
2014-02-28 22:05:11 +00:00
|
|
|
builder->SaveTo(v);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize new descriptor log file if necessary by creating
|
|
|
|
// a temporary file that contains a snapshot of the current version.
|
2012-09-24 21:01:01 +00:00
|
|
|
uint64_t new_manifest_file_size = 0;
|
2011-05-21 02:17:43 +00:00
|
|
|
Status s;
|
2013-01-11 01:18:50 +00:00
|
|
|
|
2014-03-18 04:50:15 +00:00
|
|
|
assert(pending_manifest_file_number_ == 0);
|
2013-02-19 04:08:12 +00:00
|
|
|
if (!descriptor_log_ ||
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_ > options_->max_manifest_file_size) {
|
2014-03-18 04:50:15 +00:00
|
|
|
pending_manifest_file_number_ = NewFileNumber();
|
|
|
|
batch_edits.back()->SetNextFile(next_file_number_);
|
2013-01-11 01:18:50 +00:00
|
|
|
new_descriptor_log = true;
|
2014-03-18 04:50:15 +00:00
|
|
|
} else {
|
|
|
|
pending_manifest_file_number_ = manifest_file_number_;
|
2013-01-11 01:18:50 +00:00
|
|
|
}
|
|
|
|
|
2013-11-08 23:23:46 +00:00
|
|
|
if (new_descriptor_log) {
|
2014-03-18 20:24:27 +00:00
|
|
|
// if we're writing out new snapshot make sure to persist max column family
|
2014-03-13 01:09:03 +00:00
|
|
|
if (column_family_set_->GetMaxColumnFamily() > 0) {
|
|
|
|
edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-07 04:29:17 +00:00
|
|
|
// Unlock during expensive operations. New writes cannot get here
|
2012-10-19 21:00:53 +00:00
|
|
|
// because &w is ensuring that all new writes get queued.
|
2011-09-01 19:08:02 +00:00
|
|
|
{
|
2014-02-28 22:05:11 +00:00
|
|
|
std::vector<uint64_t> size_being_compacted;
|
|
|
|
if (!edit->IsColumnFamilyManipulation()) {
|
|
|
|
size_being_compacted.resize(v->NumberLevels() - 1);
|
|
|
|
// calculate the amount of data being compacted at every level
|
|
|
|
column_family_data->compaction_picker()->SizeBeingCompacted(
|
|
|
|
size_being_compacted);
|
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
mu->Unlock();
|
2012-11-01 05:01:57 +00:00
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
if (!edit->IsColumnFamilyManipulation() && options_->max_open_files == -1) {
|
2014-01-07 04:29:17 +00:00
|
|
|
// unlimited table cache. Pre-load table handle now.
|
|
|
|
// Need to do it out of the mutex.
|
2014-02-28 22:05:11 +00:00
|
|
|
builder->LoadTableHandlers();
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
|
|
|
|
2013-11-01 19:32:27 +00:00
|
|
|
// This is fine because everything inside of this block is serialized --
|
|
|
|
// only one thread can be here at the same time
|
2014-03-13 01:09:03 +00:00
|
|
|
if (new_descriptor_log) {
|
2014-08-04 18:25:42 +00:00
|
|
|
// create manifest file
|
|
|
|
Log(options_->info_log,
|
|
|
|
"Creating manifest %" PRIu64 "\n", pending_manifest_file_number_);
|
2013-11-01 19:32:27 +00:00
|
|
|
unique_ptr<WritableFile> descriptor_file;
|
2014-03-18 04:50:15 +00:00
|
|
|
s = env_->NewWritableFile(
|
|
|
|
DescriptorFileName(dbname_, pending_manifest_file_number_),
|
2014-03-18 04:52:14 +00:00
|
|
|
&descriptor_file, env_->OptimizeForManifestWrite(storage_options_));
|
2013-11-01 19:32:27 +00:00
|
|
|
if (s.ok()) {
|
2014-03-26 16:37:53 +00:00
|
|
|
descriptor_file->SetPreallocationBlockSize(
|
|
|
|
options_->manifest_preallocation_size);
|
2013-11-01 19:32:27 +00:00
|
|
|
descriptor_log_.reset(new log::Writer(std::move(descriptor_file)));
|
|
|
|
s = WriteSnapshot(descriptor_log_.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
if (!edit->IsColumnFamilyManipulation()) {
|
2014-06-13 22:06:10 +00:00
|
|
|
// This is cpu-heavy operations, which should be called outside mutex.
|
|
|
|
v->PrepareApply(size_being_compacted);
|
2014-02-28 22:05:11 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
|
|
|
|
// Write new record to MANIFEST log
|
2011-03-18 22:37:00 +00:00
|
|
|
if (s.ok()) {
|
2014-02-28 20:22:45 +00:00
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
std::string record;
|
|
|
|
e->EncodeTo(&record);
|
2012-10-19 21:00:53 +00:00
|
|
|
s = descriptor_log_->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
if (s.ok()) {
|
2012-08-27 19:10:26 +00:00
|
|
|
if (options_->use_fsync) {
|
2013-11-22 22:14:05 +00:00
|
|
|
StopWatch sw(env_, options_->statistics.get(),
|
|
|
|
MANIFEST_FILE_SYNC_MICROS);
|
2013-01-20 10:07:13 +00:00
|
|
|
s = descriptor_log_->file()->Fsync();
|
2012-08-27 19:10:26 +00:00
|
|
|
} else {
|
2013-11-22 22:14:05 +00:00
|
|
|
StopWatch sw(env_, options_->statistics.get(),
|
|
|
|
MANIFEST_FILE_SYNC_MICROS);
|
2013-01-20 10:07:13 +00:00
|
|
|
s = descriptor_log_->file()->Sync();
|
2012-08-27 19:10:26 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
}
|
2013-01-08 20:00:13 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
Log(options_->info_log, "MANIFEST write: %s\n", s.ToString().c_str());
|
2014-02-28 20:22:45 +00:00
|
|
|
bool all_records_in = true;
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
std::string record;
|
|
|
|
e->EncodeTo(&record);
|
2014-03-18 04:50:15 +00:00
|
|
|
if (!ManifestContains(pending_manifest_file_number_, record)) {
|
2014-02-28 20:22:45 +00:00
|
|
|
all_records_in = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (all_records_in) {
|
2013-01-08 20:00:13 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"MANIFEST contains log record despite error; advancing to new "
|
2013-03-06 21:28:54 +00:00
|
|
|
"version to prevent mismatch between in-memory and logged state"
|
|
|
|
" If paranoid is set, then the db is now in readonly mode.");
|
2013-01-08 20:00:13 +00:00
|
|
|
s = Status::OK();
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
// If we just created a new descriptor file, install it by writing a
|
|
|
|
// new CURRENT file that points to it.
|
2014-03-18 04:50:15 +00:00
|
|
|
if (s.ok() && new_descriptor_log) {
|
2014-05-06 21:51:33 +00:00
|
|
|
s = SetCurrentFile(env_, dbname_, pending_manifest_file_number_,
|
|
|
|
db_directory);
|
2014-03-18 04:50:15 +00:00
|
|
|
if (s.ok() && pending_manifest_file_number_ > manifest_file_number_) {
|
2013-11-08 23:23:46 +00:00
|
|
|
// delete old manifest file
|
|
|
|
Log(options_->info_log,
|
2014-03-18 04:50:15 +00:00
|
|
|
"Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n",
|
|
|
|
manifest_file_number_, pending_manifest_file_number_);
|
2013-11-08 23:23:46 +00:00
|
|
|
// we don't care about an error here, PurgeObsoleteFiles will take care
|
|
|
|
// of it later
|
2014-03-18 04:50:15 +00:00
|
|
|
env_->DeleteFile(DescriptorFileName(dbname_, manifest_file_number_));
|
2013-11-08 23:23:46 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
}
|
|
|
|
|
2014-01-29 00:02:51 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
// find offset in manifest file where this version is stored.
|
|
|
|
new_manifest_file_size = descriptor_log_->file()->GetFileSize();
|
|
|
|
}
|
2012-11-29 00:42:36 +00:00
|
|
|
|
2013-11-07 19:31:56 +00:00
|
|
|
LogFlush(options_->info_log);
|
2011-09-01 19:08:02 +00:00
|
|
|
mu->Lock();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Install the new version
|
|
|
|
if (s.ok()) {
|
2014-02-28 22:05:11 +00:00
|
|
|
if (edit->is_column_family_add_) {
|
|
|
|
// no group commit on column family add
|
|
|
|
assert(batch_edits.size() == 1);
|
|
|
|
assert(options != nullptr);
|
|
|
|
CreateColumnFamily(*options, edit);
|
|
|
|
} else if (edit->is_column_family_drop_) {
|
|
|
|
assert(batch_edits.size() == 1);
|
2014-03-11 03:22:31 +00:00
|
|
|
column_family_data->SetDropped();
|
2014-02-28 22:05:11 +00:00
|
|
|
if (column_family_data->Unref()) {
|
|
|
|
delete column_family_data;
|
|
|
|
}
|
|
|
|
} else {
|
2014-03-14 20:11:41 +00:00
|
|
|
uint64_t max_log_number_in_batch = 0;
|
|
|
|
for (auto& e : batch_edits) {
|
|
|
|
if (e->has_log_number_) {
|
|
|
|
max_log_number_in_batch =
|
|
|
|
std::max(max_log_number_in_batch, e->log_number_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (max_log_number_in_batch != 0) {
|
2014-04-15 16:57:25 +00:00
|
|
|
assert(column_family_data->GetLogNumber() <= max_log_number_in_batch);
|
2014-03-14 20:11:41 +00:00
|
|
|
column_family_data->SetLogNumber(max_log_number_in_batch);
|
|
|
|
}
|
2014-02-28 22:05:11 +00:00
|
|
|
AppendVersion(column_family_data, v);
|
|
|
|
}
|
|
|
|
|
2014-03-18 04:50:15 +00:00
|
|
|
manifest_file_number_ = pending_manifest_file_number_;
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_ = new_manifest_file_size;
|
2011-04-12 19:38:58 +00:00
|
|
|
prev_log_number_ = edit->prev_log_number_;
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
2014-04-25 13:51:16 +00:00
|
|
|
Log(options_->info_log, "Error in committing version %lu to [%s]",
|
|
|
|
(unsigned long)v->GetVersionNumber(),
|
|
|
|
column_family_data->GetName().c_str());
|
2011-03-18 22:37:00 +00:00
|
|
|
delete v;
|
2014-03-18 04:50:15 +00:00
|
|
|
if (new_descriptor_log) {
|
2014-08-04 18:25:42 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n",
|
|
|
|
manifest_file_number_, pending_manifest_file_number_);
|
2013-01-20 10:07:13 +00:00
|
|
|
descriptor_log_.reset();
|
2014-03-18 04:50:15 +00:00
|
|
|
env_->DeleteFile(
|
|
|
|
DescriptorFileName(dbname_, pending_manifest_file_number_));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
2014-03-18 04:50:15 +00:00
|
|
|
pending_manifest_file_number_ = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// wake up all the waiting writers
|
|
|
|
while (true) {
|
|
|
|
ManifestWriter* ready = manifest_writers_.front();
|
|
|
|
manifest_writers_.pop_front();
|
|
|
|
if (ready != &w) {
|
|
|
|
ready->status = s;
|
|
|
|
ready->done = true;
|
|
|
|
ready->cv.Signal();
|
|
|
|
}
|
|
|
|
if (ready == last_writer) break;
|
|
|
|
}
|
|
|
|
// Notify new head of write queue
|
|
|
|
if (!manifest_writers_.empty()) {
|
|
|
|
manifest_writers_.front()->cv.Signal();
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) {
|
|
|
|
assert(edit->IsColumnFamilyManipulation());
|
|
|
|
edit->SetNextFile(next_file_number_);
|
|
|
|
edit->SetLastSequence(last_sequence_);
|
|
|
|
if (edit->is_column_family_drop_) {
|
|
|
|
// if we drop column family, we have to make sure to save max column family,
|
|
|
|
// so that we don't reuse existing ID
|
|
|
|
edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-31 01:48:42 +00:00
|
|
|
void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd, Builder* builder,
|
|
|
|
Version* v, VersionEdit* edit,
|
|
|
|
port::Mutex* mu) {
|
2012-10-19 21:00:53 +00:00
|
|
|
mu->AssertHeld();
|
2014-03-13 01:09:03 +00:00
|
|
|
assert(!edit->IsColumnFamilyManipulation());
|
2012-10-19 21:00:53 +00:00
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
if (edit->has_log_number_) {
|
|
|
|
assert(edit->log_number_ >= cfd->GetLogNumber());
|
2014-03-14 20:11:41 +00:00
|
|
|
assert(edit->log_number_ < next_file_number_);
|
2014-02-28 18:29:37 +00:00
|
|
|
}
|
2014-02-28 22:05:11 +00:00
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
if (!edit->has_prev_log_number_) {
|
|
|
|
edit->SetPrevLogNumber(prev_log_number_);
|
|
|
|
}
|
|
|
|
edit->SetNextFile(next_file_number_);
|
|
|
|
edit->SetLastSequence(last_sequence_);
|
|
|
|
|
2014-02-28 22:05:11 +00:00
|
|
|
builder->Apply(edit);
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
Status VersionSet::Recover(
|
2014-04-09 16:56:17 +00:00
|
|
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
|
|
|
bool read_only) {
|
2014-01-22 19:44:53 +00:00
|
|
|
std::unordered_map<std::string, ColumnFamilyOptions> cf_name_to_options;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
cf_name_to_options.insert({cf.name, cf.options});
|
|
|
|
}
|
|
|
|
// keeps track of column families in manifest that were not found in
|
|
|
|
// column families parameters. if those column families are not dropped
|
|
|
|
// by subsequent manifest records, Recover() will return failure status
|
2014-04-09 17:38:05 +00:00
|
|
|
std::unordered_map<int, std::string> column_families_not_found;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Read "CURRENT" file, which contains a pointer to the current manifest file
|
2013-12-31 02:33:57 +00:00
|
|
|
std::string manifest_filename;
|
|
|
|
Status s = ReadFileToString(
|
|
|
|
env_, CurrentFileName(dbname_), &manifest_filename
|
|
|
|
);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2013-12-31 02:33:57 +00:00
|
|
|
if (manifest_filename.empty() ||
|
|
|
|
manifest_filename.back() != '\n') {
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::Corruption("CURRENT file does not end with newline");
|
|
|
|
}
|
2013-12-31 02:33:57 +00:00
|
|
|
// remove the trailing '\n'
|
|
|
|
manifest_filename.resize(manifest_filename.size() - 1);
|
2014-03-12 17:52:32 +00:00
|
|
|
FileType type;
|
|
|
|
bool parse_ok =
|
|
|
|
ParseFileName(manifest_filename, &manifest_file_number_, &type);
|
|
|
|
if (!parse_ok || type != kDescriptorFile) {
|
|
|
|
return Status::Corruption("CURRENT file corrupted");
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-25 13:51:16 +00:00
|
|
|
Log(options_->info_log, "Recovering from manifest file: %s\n",
|
2013-12-31 02:33:57 +00:00
|
|
|
manifest_filename.c_str());
|
2012-08-23 02:15:06 +00:00
|
|
|
|
2013-12-31 02:33:57 +00:00
|
|
|
manifest_filename = dbname_ + "/" + manifest_filename;
|
|
|
|
unique_ptr<SequentialFile> manifest_file;
|
2014-03-12 17:52:32 +00:00
|
|
|
s = env_->NewSequentialFile(manifest_filename, &manifest_file,
|
|
|
|
storage_options_);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2012-09-24 21:01:01 +00:00
|
|
|
uint64_t manifest_file_size;
|
2013-12-31 02:33:57 +00:00
|
|
|
s = env_->GetFileSize(manifest_filename, &manifest_file_size);
|
2012-09-24 21:01:01 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
bool have_log_number = false;
|
2011-04-12 19:38:58 +00:00
|
|
|
bool have_prev_log_number = false;
|
2011-03-18 22:37:00 +00:00
|
|
|
bool have_next_file = false;
|
|
|
|
bool have_last_sequence = false;
|
|
|
|
uint64_t next_file = 0;
|
2011-04-12 19:38:58 +00:00
|
|
|
uint64_t last_sequence = 0;
|
|
|
|
uint64_t log_number = 0;
|
|
|
|
uint64_t prev_log_number = 0;
|
2014-03-05 20:13:44 +00:00
|
|
|
uint32_t max_column_family = 0;
|
2014-01-10 23:12:34 +00:00
|
|
|
std::unordered_map<uint32_t, Builder*> builders;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-01-06 21:31:06 +00:00
|
|
|
// add default column family
|
2014-04-09 16:56:17 +00:00
|
|
|
auto default_cf_iter = cf_name_to_options.find(kDefaultColumnFamilyName);
|
2014-01-22 19:44:53 +00:00
|
|
|
if (default_cf_iter == cf_name_to_options.end()) {
|
2014-03-13 01:09:03 +00:00
|
|
|
return Status::InvalidArgument("Default column family not specified");
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
2014-03-13 01:09:03 +00:00
|
|
|
VersionEdit default_cf_edit;
|
2014-04-09 16:56:17 +00:00
|
|
|
default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
|
2014-03-13 01:09:03 +00:00
|
|
|
default_cf_edit.SetColumnFamily(0);
|
|
|
|
ColumnFamilyData* default_cfd =
|
|
|
|
CreateColumnFamily(default_cf_iter->second, &default_cf_edit);
|
|
|
|
builders.insert({0, new Builder(default_cfd)});
|
2014-01-06 21:31:06 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
{
|
2014-01-22 19:44:53 +00:00
|
|
|
VersionSet::LogReporter reporter;
|
2011-03-18 22:37:00 +00:00
|
|
|
reporter.status = &s;
|
2013-12-31 02:33:57 +00:00
|
|
|
log::Reader reader(std::move(manifest_file), &reporter, true /*checksum*/,
|
|
|
|
0 /*initial_offset*/);
|
2011-03-18 22:37:00 +00:00
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
2014-01-14 23:27:09 +00:00
|
|
|
VersionEdit edit;
|
2011-03-18 22:37:00 +00:00
|
|
|
s = edit.DecodeFrom(record);
|
2014-01-10 23:12:34 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-02-03 20:08:33 +00:00
|
|
|
// Not found means that user didn't supply that column
|
|
|
|
// family option AND we encountered column family add
|
|
|
|
// record. Once we encounter column family drop record,
|
|
|
|
// we will delete the column family from
|
|
|
|
// column_families_not_found.
|
2014-01-22 19:44:53 +00:00
|
|
|
bool cf_in_not_found =
|
|
|
|
column_families_not_found.find(edit.column_family_) !=
|
|
|
|
column_families_not_found.end();
|
2014-02-03 20:08:33 +00:00
|
|
|
// in builders means that user supplied that column family
|
|
|
|
// option AND that we encountered column family add record
|
2014-01-22 19:44:53 +00:00
|
|
|
bool cf_in_builders =
|
|
|
|
builders.find(edit.column_family_) != builders.end();
|
|
|
|
|
|
|
|
// they can't both be true
|
|
|
|
assert(!(cf_in_not_found && cf_in_builders));
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
|
2014-01-02 17:08:12 +00:00
|
|
|
if (edit.is_column_family_add_) {
|
2014-01-22 19:44:53 +00:00
|
|
|
if (cf_in_builders || cf_in_not_found) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto cf_options = cf_name_to_options.find(edit.column_family_name_);
|
|
|
|
if (cf_options == cf_name_to_options.end()) {
|
2014-04-09 17:38:05 +00:00
|
|
|
column_families_not_found.insert(
|
|
|
|
{edit.column_family_, edit.column_family_name_});
|
2014-01-22 19:44:53 +00:00
|
|
|
} else {
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = CreateColumnFamily(cf_options->second, &edit);
|
|
|
|
builders.insert({edit.column_family_, new Builder(cfd)});
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
2014-01-10 23:12:34 +00:00
|
|
|
} else if (edit.is_column_family_drop_) {
|
2014-01-22 19:44:53 +00:00
|
|
|
if (cf_in_builders) {
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
delete builder->second;
|
|
|
|
builders.erase(builder);
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-02-11 01:04:44 +00:00
|
|
|
if (cfd->Unref()) {
|
|
|
|
delete cfd;
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = nullptr;
|
2014-02-11 01:04:44 +00:00
|
|
|
} else {
|
|
|
|
// who else can have reference to cfd!?
|
|
|
|
assert(false);
|
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
} else if (cf_in_not_found) {
|
|
|
|
column_families_not_found.erase(edit.column_family_);
|
|
|
|
} else {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (!cf_in_not_found) {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest record referencing unknown column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-01-22 19:44:53 +00:00
|
|
|
// this should never happen since cf_in_builders is true
|
|
|
|
assert(cfd != nullptr);
|
2014-01-29 21:28:50 +00:00
|
|
|
if (edit.max_level_ >= cfd->current()->NumberLevels()) {
|
2014-01-22 01:01:52 +00:00
|
|
|
s = Status::InvalidArgument(
|
|
|
|
"db has more levels than options.num_levels");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
// if it is not column family add or column family drop,
|
|
|
|
// then it's a file add/delete, which should be forwarded
|
|
|
|
// to builder
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
builder->second->Apply(&edit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfd != nullptr) {
|
2014-01-28 19:05:04 +00:00
|
|
|
if (edit.has_log_number_) {
|
2014-03-14 20:11:41 +00:00
|
|
|
if (cfd->GetLogNumber() > edit.log_number_) {
|
2014-03-31 19:44:54 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"MANIFEST corruption detected, but ignored - Log numbers in "
|
|
|
|
"records NOT monotonically increasing");
|
2014-03-18 20:24:27 +00:00
|
|
|
} else {
|
|
|
|
cfd->SetLogNumber(edit.log_number_);
|
|
|
|
have_log_number = true;
|
2014-03-14 20:11:41 +00:00
|
|
|
}
|
2014-01-28 19:05:04 +00:00
|
|
|
}
|
2014-02-03 20:08:33 +00:00
|
|
|
if (edit.has_comparator_ &&
|
|
|
|
edit.comparator_ != cfd->user_comparator()->Name()) {
|
|
|
|
s = Status::InvalidArgument(
|
|
|
|
cfd->user_comparator()->Name(),
|
|
|
|
"does not match existing comparator " + edit.comparator_);
|
|
|
|
break;
|
|
|
|
}
|
2014-01-02 17:08:12 +00:00
|
|
|
}
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
if (edit.has_prev_log_number_) {
|
|
|
|
prev_log_number = edit.prev_log_number_;
|
|
|
|
have_prev_log_number = true;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (edit.has_next_file_number_) {
|
|
|
|
next_file = edit.next_file_number_;
|
|
|
|
have_next_file = true;
|
|
|
|
}
|
|
|
|
|
2014-03-05 20:13:44 +00:00
|
|
|
if (edit.has_max_column_family_) {
|
|
|
|
max_column_family = edit.max_column_family_;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (edit.has_last_sequence_) {
|
2011-04-12 19:38:58 +00:00
|
|
|
last_sequence = edit.last_sequence_;
|
2011-03-18 22:37:00 +00:00
|
|
|
have_last_sequence = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (!have_next_file) {
|
|
|
|
s = Status::Corruption("no meta-nextfile entry in descriptor");
|
|
|
|
} else if (!have_log_number) {
|
|
|
|
s = Status::Corruption("no meta-lognumber entry in descriptor");
|
|
|
|
} else if (!have_last_sequence) {
|
|
|
|
s = Status::Corruption("no last-sequence-number entry in descriptor");
|
|
|
|
}
|
2011-04-12 19:38:58 +00:00
|
|
|
|
|
|
|
if (!have_prev_log_number) {
|
|
|
|
prev_log_number = 0;
|
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
|
2014-03-05 20:13:44 +00:00
|
|
|
column_family_set_->UpdateMaxColumnFamily(max_column_family);
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
MarkFileNumberUsed(prev_log_number);
|
|
|
|
MarkFileNumberUsed(log_number);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
// there were some column families in the MANIFEST that weren't specified
|
2014-04-09 16:56:17 +00:00
|
|
|
// in the argument. This is OK in read_only mode
|
|
|
|
if (read_only == false && column_families_not_found.size() > 0) {
|
|
|
|
std::string list_of_not_found;
|
2014-04-09 17:38:05 +00:00
|
|
|
for (const auto& cf : column_families_not_found) {
|
|
|
|
list_of_not_found += ", " + cf.second;
|
2014-04-09 16:56:17 +00:00
|
|
|
}
|
|
|
|
list_of_not_found = list_of_not_found.substr(2);
|
2014-01-22 19:44:53 +00:00
|
|
|
s = Status::InvalidArgument(
|
2014-04-09 17:38:05 +00:00
|
|
|
"You have to open all column families. Column families not opened: " +
|
|
|
|
list_of_not_found);
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
if (s.ok()) {
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-02-12 22:01:30 +00:00
|
|
|
auto builders_iter = builders.find(cfd->GetID());
|
|
|
|
assert(builders_iter != builders.end());
|
|
|
|
auto builder = builders_iter->second;
|
|
|
|
|
|
|
|
if (options_->max_open_files == -1) {
|
2014-02-12 18:43:27 +00:00
|
|
|
// unlimited table cache. Pre-load table handle now.
|
|
|
|
// Need to do it out of the mutex.
|
2014-02-12 22:01:30 +00:00
|
|
|
builder->LoadTableHandlers();
|
|
|
|
}
|
2014-02-12 18:43:27 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
Version* v = new Version(cfd, this, current_version_number_++);
|
2014-02-12 22:01:30 +00:00
|
|
|
builder->SaveTo(v);
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
// Install recovered version
|
2014-01-22 01:01:52 +00:00
|
|
|
std::vector<uint64_t> size_being_compacted(v->NumberLevels() - 1);
|
2014-01-31 23:30:27 +00:00
|
|
|
cfd->compaction_picker()->SizeBeingCompacted(size_being_compacted);
|
2014-06-13 22:06:10 +00:00
|
|
|
v->PrepareApply(size_being_compacted);
|
2014-01-22 19:44:53 +00:00
|
|
|
AppendVersion(cfd, v);
|
2014-01-10 23:12:34 +00:00
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
manifest_file_size_ = manifest_file_size;
|
2011-05-21 02:17:43 +00:00
|
|
|
next_file_number_ = next_file + 1;
|
|
|
|
last_sequence_ = last_sequence;
|
|
|
|
prev_log_number_ = prev_log_number;
|
2012-08-23 02:15:06 +00:00
|
|
|
|
2014-07-02 16:54:20 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"Recovered from manifest file:%s succeeded,"
|
2013-11-13 05:02:03 +00:00
|
|
|
"manifest_file_number is %lu, next_file_number is %lu, "
|
|
|
|
"last_sequence is %lu, log_number is %lu,"
|
2014-03-05 20:13:44 +00:00
|
|
|
"prev_log_number is %lu,"
|
|
|
|
"max_column_family is %u\n",
|
2014-07-02 16:54:20 +00:00
|
|
|
manifest_filename.c_str(), (unsigned long)manifest_file_number_,
|
|
|
|
(unsigned long)next_file_number_, (unsigned long)last_sequence_,
|
|
|
|
(unsigned long)log_number, (unsigned long)prev_log_number_,
|
2014-03-05 20:13:44 +00:00
|
|
|
column_family_set_->GetMaxColumnFamily());
|
2014-01-28 19:05:04 +00:00
|
|
|
|
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-02-26 22:16:23 +00:00
|
|
|
Log(options_->info_log,
|
2014-04-25 13:51:16 +00:00
|
|
|
"Column family [%s] (ID %u), log number is %" PRIu64 "\n",
|
|
|
|
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
|
2014-01-28 19:05:04 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
for (auto builder : builders) {
|
|
|
|
delete builder.second;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families,
|
|
|
|
const std::string& dbname, Env* env) {
|
|
|
|
// these are just for performance reasons, not correcntes,
|
|
|
|
// so we're fine using the defaults
|
|
|
|
EnvOptions soptions;
|
|
|
|
// Read "CURRENT" file, which contains a pointer to the current manifest file
|
|
|
|
std::string current;
|
|
|
|
Status s = ReadFileToString(env, CurrentFileName(dbname), ¤t);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (current.empty() || current[current.size()-1] != '\n') {
|
|
|
|
return Status::Corruption("CURRENT file does not end with newline");
|
|
|
|
}
|
|
|
|
current.resize(current.size() - 1);
|
|
|
|
|
|
|
|
std::string dscname = dbname + "/" + current;
|
|
|
|
unique_ptr<SequentialFile> file;
|
|
|
|
s = env->NewSequentialFile(dscname, &file, soptions);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::map<uint32_t, std::string> column_family_names;
|
|
|
|
// default column family is always implicitly there
|
2014-04-09 16:56:17 +00:00
|
|
|
column_family_names.insert({0, kDefaultColumnFamilyName});
|
2014-01-22 19:44:53 +00:00
|
|
|
VersionSet::LogReporter reporter;
|
|
|
|
reporter.status = &s;
|
|
|
|
log::Reader reader(std::move(file), &reporter, true /*checksum*/,
|
|
|
|
0 /*initial_offset*/);
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
2014-02-11 01:04:44 +00:00
|
|
|
VersionEdit edit;
|
|
|
|
s = edit.DecodeFrom(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (edit.is_column_family_add_) {
|
2014-02-28 22:05:11 +00:00
|
|
|
if (column_family_names.find(edit.column_family_) !=
|
|
|
|
column_family_names.end()) {
|
|
|
|
s = Status::Corruption("Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
2014-02-11 01:04:44 +00:00
|
|
|
column_family_names.insert(
|
|
|
|
{edit.column_family_, edit.column_family_name_});
|
|
|
|
} else if (edit.is_column_family_drop_) {
|
2014-02-28 22:05:11 +00:00
|
|
|
if (column_family_names.find(edit.column_family_) ==
|
|
|
|
column_family_names.end()) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
2014-02-11 01:04:44 +00:00
|
|
|
column_family_names.erase(edit.column_family_);
|
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
column_families->clear();
|
|
|
|
if (s.ok()) {
|
|
|
|
for (const auto& iter : column_family_names) {
|
|
|
|
column_families->push_back(iter.second);
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
Status VersionSet::ReduceNumberOfLevels(const std::string& dbname,
|
|
|
|
const Options* options,
|
|
|
|
const EnvOptions& storage_options,
|
|
|
|
int new_levels) {
|
|
|
|
if (new_levels <= 1) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Number of levels needs to be bigger than 1");
|
|
|
|
}
|
|
|
|
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
ColumnFamilyOptions cf_options(*options);
|
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(
|
|
|
|
options->max_open_files - 10, options->table_cache_numshardbits,
|
|
|
|
options->table_cache_remove_scan_count_limit));
|
|
|
|
VersionSet versions(dbname, options, storage_options, tc.get());
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
Status status;
|
|
|
|
|
2014-01-24 23:03:54 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
2014-04-09 16:56:17 +00:00
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
2014-02-26 18:03:34 +00:00
|
|
|
ColumnFamilyOptions(*options));
|
2014-02-01 03:44:48 +00:00
|
|
|
dummy.push_back(dummy_descriptor);
|
2014-01-24 23:03:54 +00:00
|
|
|
status = versions.Recover(dummy);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2014-01-27 22:33:50 +00:00
|
|
|
Version* current_version =
|
2014-01-29 21:28:50 +00:00
|
|
|
versions.GetColumnFamilySet()->GetDefault()->current();
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
int current_levels = current_version->NumberLevels();
|
|
|
|
|
|
|
|
if (current_levels <= new_levels) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure there are file only on one level from
|
|
|
|
// (new_levels-1) to (current_levels-1)
|
|
|
|
int first_nonempty_level = -1;
|
|
|
|
int first_nonempty_level_filenum = 0;
|
|
|
|
for (int i = new_levels - 1; i < current_levels; i++) {
|
|
|
|
int file_num = current_version->NumLevelFiles(i);
|
|
|
|
if (file_num != 0) {
|
|
|
|
if (first_nonempty_level < 0) {
|
|
|
|
first_nonempty_level = i;
|
|
|
|
first_nonempty_level_filenum = file_num;
|
|
|
|
} else {
|
|
|
|
char msg[255];
|
|
|
|
snprintf(msg, sizeof(msg),
|
|
|
|
"Found at least two levels containing files: "
|
|
|
|
"[%d:%d],[%d:%d].\n",
|
|
|
|
first_nonempty_level, first_nonempty_level_filenum, i,
|
|
|
|
file_num);
|
|
|
|
return Status::InvalidArgument(msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<FileMetaData*>* old_files_list = current_version->files_;
|
2014-01-25 02:30:00 +00:00
|
|
|
// we need to allocate an array with the old number of levels size to
|
|
|
|
// avoid SIGSEGV in WriteSnapshot()
|
|
|
|
// however, all levels bigger or equal to new_levels will be empty
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
std::vector<FileMetaData*>* new_files_list =
|
2014-01-25 02:30:00 +00:00
|
|
|
new std::vector<FileMetaData*>[current_levels];
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
for (int i = 0; i < new_levels - 1; i++) {
|
|
|
|
new_files_list[i] = old_files_list[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first_nonempty_level > 0) {
|
|
|
|
new_files_list[new_levels - 1] = old_files_list[first_nonempty_level];
|
|
|
|
}
|
|
|
|
|
|
|
|
delete[] current_version->files_;
|
|
|
|
current_version->files_ = new_files_list;
|
|
|
|
current_version->num_levels_ = new_levels;
|
|
|
|
|
|
|
|
VersionEdit ve;
|
|
|
|
port::Mutex dummy_mutex;
|
|
|
|
MutexLock l(&dummy_mutex);
|
2014-01-27 21:55:47 +00:00
|
|
|
return versions.LogAndApply(versions.GetColumnFamilySet()->GetDefault(), &ve,
|
|
|
|
&dummy_mutex, nullptr, true);
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
|
|
|
bool verbose, bool hex) {
|
2012-08-17 17:48:40 +00:00
|
|
|
// Open the specified manifest file.
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<SequentialFile> file;
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = options.env->NewSequentialFile(dscname, &file, storage_options_);
|
2012-08-17 17:48:40 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool have_prev_log_number = false;
|
|
|
|
bool have_next_file = false;
|
|
|
|
bool have_last_sequence = false;
|
|
|
|
uint64_t next_file = 0;
|
|
|
|
uint64_t last_sequence = 0;
|
|
|
|
uint64_t prev_log_number = 0;
|
2012-11-19 19:54:13 +00:00
|
|
|
int count = 0;
|
2014-02-28 00:18:23 +00:00
|
|
|
std::unordered_map<uint32_t, std::string> comparators;
|
|
|
|
std::unordered_map<uint32_t, Builder*> builders;
|
|
|
|
|
|
|
|
// add default column family
|
|
|
|
VersionEdit default_cf_edit;
|
2014-04-09 16:56:17 +00:00
|
|
|
default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
|
2014-02-28 00:18:23 +00:00
|
|
|
default_cf_edit.SetColumnFamily(0);
|
|
|
|
ColumnFamilyData* default_cfd =
|
|
|
|
CreateColumnFamily(ColumnFamilyOptions(options), &default_cf_edit);
|
2014-02-28 19:25:38 +00:00
|
|
|
builders.insert({0, new Builder(default_cfd)});
|
2012-08-17 17:48:40 +00:00
|
|
|
|
|
|
|
{
|
2014-01-22 19:44:53 +00:00
|
|
|
VersionSet::LogReporter reporter;
|
2012-08-17 17:48:40 +00:00
|
|
|
reporter.status = &s;
|
2013-01-20 10:07:13 +00:00
|
|
|
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
|
|
|
0/*initial_offset*/);
|
2012-08-17 17:48:40 +00:00
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
2014-01-14 23:27:09 +00:00
|
|
|
VersionEdit edit;
|
2012-08-17 17:48:40 +00:00
|
|
|
s = edit.DecodeFrom(record);
|
2014-02-28 00:18:23 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
|
2012-11-19 20:16:45 +00:00
|
|
|
// Write out each individual edit
|
|
|
|
if (verbose) {
|
2012-11-29 00:42:36 +00:00
|
|
|
printf("*************************Edit[%d] = %s\n",
|
2013-08-08 22:51:16 +00:00
|
|
|
count, edit.DebugString(hex).c_str());
|
2012-11-19 20:16:45 +00:00
|
|
|
}
|
|
|
|
count++;
|
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
bool cf_in_builders =
|
|
|
|
builders.find(edit.column_family_) != builders.end();
|
|
|
|
|
|
|
|
if (edit.has_comparator_) {
|
|
|
|
comparators.insert({edit.column_family_, edit.comparator_});
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
if (edit.is_column_family_add_) {
|
|
|
|
if (cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest adding the same column family twice");
|
|
|
|
break;
|
|
|
|
}
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = CreateColumnFamily(ColumnFamilyOptions(options), &edit);
|
|
|
|
builders.insert({edit.column_family_, new Builder(cfd)});
|
2014-02-28 00:18:23 +00:00
|
|
|
} else if (edit.is_column_family_drop_) {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest - dropping non-existing column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto builder_iter = builders.find(edit.column_family_);
|
|
|
|
delete builder_iter->second;
|
|
|
|
builders.erase(builder_iter);
|
|
|
|
comparators.erase(edit.column_family_);
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-02-28 00:18:23 +00:00
|
|
|
assert(cfd != nullptr);
|
|
|
|
cfd->Unref();
|
|
|
|
delete cfd;
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = nullptr;
|
2014-02-28 00:18:23 +00:00
|
|
|
} else {
|
|
|
|
if (!cf_in_builders) {
|
|
|
|
s = Status::Corruption(
|
|
|
|
"Manifest record referencing unknown column family");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
cfd = column_family_set_->GetColumnFamily(edit.column_family_);
|
2014-02-28 00:18:23 +00:00
|
|
|
// this should never happen since cf_in_builders is true
|
|
|
|
assert(cfd != nullptr);
|
|
|
|
|
|
|
|
// if it is not column family add or column family drop,
|
|
|
|
// then it's a file add/delete, which should be forwarded
|
|
|
|
// to builder
|
|
|
|
auto builder = builders.find(edit.column_family_);
|
|
|
|
assert(builder != builders.end());
|
|
|
|
builder->second->Apply(&edit);
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
|
2014-02-28 19:25:38 +00:00
|
|
|
if (cfd != nullptr && edit.has_log_number_) {
|
|
|
|
cfd->SetLogNumber(edit.log_number_);
|
|
|
|
}
|
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
if (edit.has_prev_log_number_) {
|
|
|
|
prev_log_number = edit.prev_log_number_;
|
|
|
|
have_prev_log_number = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_next_file_number_) {
|
|
|
|
next_file = edit.next_file_number_;
|
|
|
|
have_next_file = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (edit.has_last_sequence_) {
|
|
|
|
last_sequence = edit.last_sequence_;
|
|
|
|
have_last_sequence = true;
|
|
|
|
}
|
2014-03-05 20:13:44 +00:00
|
|
|
|
|
|
|
if (edit.has_max_column_family_) {
|
|
|
|
column_family_set_->UpdateMaxColumnFamily(edit.max_column_family_);
|
|
|
|
}
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-20 10:07:13 +00:00
|
|
|
file.reset();
|
2012-08-17 17:48:40 +00:00
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (!have_next_file) {
|
|
|
|
s = Status::Corruption("no meta-nextfile entry in descriptor");
|
|
|
|
printf("no meta-nextfile entry in descriptor");
|
|
|
|
} else if (!have_last_sequence) {
|
|
|
|
printf("no last-sequence-number entry in descriptor");
|
|
|
|
s = Status::Corruption("no last-sequence-number entry in descriptor");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!have_prev_log_number) {
|
|
|
|
prev_log_number = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2014-02-28 00:18:23 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
auto builders_iter = builders.find(cfd->GetID());
|
|
|
|
assert(builders_iter != builders.end());
|
|
|
|
auto builder = builders_iter->second;
|
|
|
|
|
|
|
|
Version* v = new Version(cfd, this, current_version_number_++);
|
|
|
|
builder->SaveTo(v);
|
2014-03-18 16:45:52 +00:00
|
|
|
std::vector<uint64_t> size_being_compacted(v->NumberLevels() - 1);
|
|
|
|
cfd->compaction_picker()->SizeBeingCompacted(size_being_compacted);
|
2014-06-13 22:06:10 +00:00
|
|
|
v->PrepareApply(size_being_compacted);
|
2014-02-28 00:18:23 +00:00
|
|
|
delete builder;
|
|
|
|
|
|
|
|
printf("--------------- Column family \"%s\" (ID %u) --------------\n",
|
|
|
|
cfd->GetName().c_str(), (unsigned int)cfd->GetID());
|
|
|
|
printf("log number: %lu\n", (unsigned long)cfd->GetLogNumber());
|
|
|
|
auto comparator = comparators.find(cfd->GetID());
|
|
|
|
if (comparator != comparators.end()) {
|
|
|
|
printf("comparator: %s\n", comparator->second.c_str());
|
|
|
|
} else {
|
|
|
|
printf("comparator: <NO COMPARATOR>\n");
|
|
|
|
}
|
|
|
|
printf("%s \n", v->DebugString(hex).c_str());
|
|
|
|
delete v;
|
|
|
|
}
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 16:47:48 +00:00
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
next_file_number_ = next_file + 1;
|
|
|
|
last_sequence_ = last_sequence;
|
|
|
|
prev_log_number_ = prev_log_number;
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
printf(
|
2014-03-12 17:52:32 +00:00
|
|
|
"next_file_number %lu last_sequence "
|
2014-03-05 20:13:44 +00:00
|
|
|
"%lu prev_log_number %lu max_column_family %u\n",
|
2014-03-12 17:52:32 +00:00
|
|
|
(unsigned long)next_file_number_, (unsigned long)last_sequence,
|
|
|
|
(unsigned long)prev_log_number,
|
2014-03-05 20:13:44 +00:00
|
|
|
column_family_set_->GetMaxColumnFamily());
|
2012-08-17 17:48:40 +00:00
|
|
|
}
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
return s;
|
|
|
|
}
|
2014-04-15 20:39:26 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
void VersionSet::MarkFileNumberUsed(uint64_t number) {
|
|
|
|
if (next_file_number_ <= number) {
|
|
|
|
next_file_number_ = number + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Status VersionSet::WriteSnapshot(log::Writer* log) {
|
|
|
|
// TODO: Break up into multiple records to reduce memory usage on recovery?
|
2013-10-16 20:32:53 +00:00
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
// WARNING: This method doesn't hold a mutex!!
|
|
|
|
|
2014-03-11 03:22:31 +00:00
|
|
|
// This is done without DB mutex lock held, but only within single-threaded
|
|
|
|
// LogAndApply. Column family manipulations can only happen within LogAndApply
|
2014-03-13 01:09:03 +00:00
|
|
|
// (the same single thread), so we're safe to iterate.
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-01-22 01:01:52 +00:00
|
|
|
{
|
|
|
|
// Store column family info
|
|
|
|
VersionEdit edit;
|
2014-01-29 21:28:50 +00:00
|
|
|
if (cfd->GetID() != 0) {
|
2014-01-22 01:01:52 +00:00
|
|
|
// default column family is always there,
|
|
|
|
// no need to explicitly write it
|
2014-01-29 21:28:50 +00:00
|
|
|
edit.AddColumnFamily(cfd->GetName());
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
2014-02-03 20:08:33 +00:00
|
|
|
}
|
|
|
|
edit.SetComparatorName(
|
|
|
|
cfd->internal_comparator().user_comparator()->Name());
|
|
|
|
std::string record;
|
|
|
|
edit.EncodeTo(&record);
|
|
|
|
Status s = log->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
2014-01-22 01:01:52 +00:00
|
|
|
}
|
2012-10-19 21:00:53 +00:00
|
|
|
|
2014-01-22 01:01:52 +00:00
|
|
|
{
|
|
|
|
// Save files
|
|
|
|
VersionEdit edit;
|
2014-01-29 21:28:50 +00:00
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
2014-01-22 01:01:52 +00:00
|
|
|
|
2014-02-03 20:08:33 +00:00
|
|
|
for (int level = 0; level < cfd->NumberLevels(); level++) {
|
2014-01-29 21:28:50 +00:00
|
|
|
for (const auto& f : cfd->current()->files_[level]) {
|
2014-07-02 16:54:20 +00:00
|
|
|
edit.AddFile(level, f->fd.GetNumber(), f->fd.GetPathId(),
|
|
|
|
f->fd.GetFileSize(), f->smallest, f->largest,
|
|
|
|
f->smallest_seqno, f->largest_seqno);
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
|
|
|
}
|
2014-01-29 21:28:50 +00:00
|
|
|
edit.SetLogNumber(cfd->GetLogNumber());
|
2014-01-22 01:01:52 +00:00
|
|
|
std::string record;
|
|
|
|
edit.EncodeTo(&record);
|
|
|
|
Status s = log->AddRecord(record);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
2014-01-10 23:12:34 +00:00
|
|
|
}
|
2014-01-02 17:08:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
return Status::OK();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-08 20:00:13 +00:00
|
|
|
// Opens the mainfest file and reads all records
|
|
|
|
// till it finds the record we are looking for.
|
2014-03-18 04:50:15 +00:00
|
|
|
bool VersionSet::ManifestContains(uint64_t manifest_file_number,
|
|
|
|
const std::string& record) const {
|
|
|
|
std::string fname =
|
|
|
|
DescriptorFileName(dbname_, manifest_file_number);
|
2013-01-08 20:00:13 +00:00
|
|
|
Log(options_->info_log, "ManifestContains: checking %s\n", fname.c_str());
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<SequentialFile> file;
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = env_->NewSequentialFile(fname, &file, storage_options_);
|
2013-01-08 20:00:13 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str());
|
2013-03-06 21:28:54 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"ManifestContains: is unable to reopen the manifest file %s",
|
|
|
|
fname.c_str());
|
2013-01-08 20:00:13 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
log::Reader reader(std::move(file), nullptr, true/*checksum*/, 0);
|
2013-01-08 20:00:13 +00:00
|
|
|
Slice r;
|
|
|
|
std::string scratch;
|
|
|
|
bool result = false;
|
|
|
|
while (reader.ReadRecord(&r, &scratch)) {
|
|
|
|
if (r == Slice(record)) {
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Log(options_->info_log, "ManifestContains: result = %d\n", result ? 1 : 0);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
|
|
|
|
uint64_t result = 0;
|
2014-01-16 00:15:43 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
const std::vector<FileMetaData*>& files = v->files_[level];
|
2011-04-20 22:48:11 +00:00
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
2014-02-03 20:08:33 +00:00
|
|
|
if (v->cfd_->internal_comparator().Compare(files[i]->largest, ikey) <=
|
|
|
|
0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Entire file is before "ikey", so just add the file size
|
2014-06-13 22:54:19 +00:00
|
|
|
result += files[i]->fd.GetFileSize();
|
2014-02-03 20:08:33 +00:00
|
|
|
} else if (v->cfd_->internal_comparator().Compare(files[i]->smallest,
|
|
|
|
ikey) > 0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Entire file is after "ikey", so ignore
|
|
|
|
if (level > 0) {
|
|
|
|
// Files other than level 0 are sorted by meta->smallest, so
|
|
|
|
// no further files in this level will contain data for
|
|
|
|
// "ikey".
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// "ikey" falls in the range for this table. Add the
|
|
|
|
// approximate offset of "ikey" within the table.
|
2013-10-30 17:52:33 +00:00
|
|
|
TableReader* table_reader_ptr;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
Iterator* iter = v->cfd_->table_cache()->NewIterator(
|
2014-02-06 23:42:16 +00:00
|
|
|
ReadOptions(), storage_options_, v->cfd_->internal_comparator(),
|
2014-06-13 22:54:19 +00:00
|
|
|
files[i]->fd, &table_reader_ptr);
|
2013-10-30 17:52:33 +00:00
|
|
|
if (table_reader_ptr != nullptr) {
|
|
|
|
result += table_reader_ptr->ApproximateOffsetOf(ikey.Encode());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-07-02 16:54:20 +00:00
|
|
|
void VersionSet::AddLiveFiles(std::vector<FileDescriptor>* live_list) {
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
2013-04-11 23:49:53 +00:00
|
|
|
// pre-calculate space requirement
|
|
|
|
int64_t total_files = 0;
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* dummy_versions = cfd->dummy_versions();
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions;
|
2014-01-22 19:44:53 +00:00
|
|
|
v = v->next_) {
|
2014-01-22 01:01:52 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2014-01-10 23:12:34 +00:00
|
|
|
total_files += v->files_[level].size();
|
|
|
|
}
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
2013-04-11 23:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// just one time extension to the right size
|
|
|
|
live_list->reserve(live_list->size() + total_files);
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* dummy_versions = cfd->dummy_versions();
|
|
|
|
for (Version* v = dummy_versions->next_; v != dummy_versions;
|
2014-01-22 19:44:53 +00:00
|
|
|
v = v->next_) {
|
2014-01-22 01:01:52 +00:00
|
|
|
for (int level = 0; level < v->NumberLevels(); level++) {
|
2014-01-10 23:12:34 +00:00
|
|
|
for (const auto& f : v->files_[level]) {
|
2014-07-02 16:54:20 +00:00
|
|
|
live_list->push_back(f->fd);
|
2014-01-10 23:12:34 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* VersionSet::MakeInputIterator(Compaction* c) {
|
2014-04-25 19:22:23 +00:00
|
|
|
auto cfd = c->column_family_data();
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.verify_checksums =
|
|
|
|
cfd->options()->verify_checksums_in_compaction;
|
|
|
|
read_options.fill_cache = false;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Level-0 files have to be merged together. For other levels,
|
|
|
|
// we will make a concatenating iterator per level.
|
|
|
|
// TODO(opt): use concatenating iterator for level-0 if there is no overlap
|
2014-07-25 00:06:00 +00:00
|
|
|
const int space = (c->level() == 0 ?
|
|
|
|
c->input_levels(0)->num_files + c->num_input_levels() - 1:
|
|
|
|
c->num_input_levels());
|
2011-03-18 22:37:00 +00:00
|
|
|
Iterator** list = new Iterator*[space];
|
|
|
|
int num = 0;
|
2014-07-25 00:06:00 +00:00
|
|
|
for (int which = 0; which < c->num_input_levels(); which++) {
|
2014-07-11 19:52:41 +00:00
|
|
|
if (c->input_levels(which)->num_files != 0) {
|
2014-07-17 01:12:17 +00:00
|
|
|
if (c->level(which) == 0) {
|
2014-07-11 19:52:41 +00:00
|
|
|
const FileLevel* flevel = c->input_levels(which);
|
|
|
|
for (size_t i = 0; i < flevel->num_files; i++) {
|
2014-04-25 19:22:23 +00:00
|
|
|
list[num++] = cfd->table_cache()->NewIterator(
|
|
|
|
read_options, storage_options_compactions_,
|
2014-07-11 19:52:41 +00:00
|
|
|
cfd->internal_comparator(), flevel->files[i].fd, nullptr,
|
2014-01-24 00:32:49 +00:00
|
|
|
true /* for compaction */);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Create concatenating iterator for the files from this level
|
2014-04-25 19:22:23 +00:00
|
|
|
list[num++] = NewTwoLevelIterator(new Version::LevelFileIteratorState(
|
|
|
|
cfd->table_cache(), read_options, storage_options_,
|
|
|
|
cfd->internal_comparator(), true /* for_compaction */,
|
|
|
|
false /* prefix enabled */),
|
|
|
|
new Version::LevelFileNumIterator(cfd->internal_comparator(),
|
2014-07-11 19:52:41 +00:00
|
|
|
c->input_levels(which)));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(num <= space);
|
2014-02-01 00:45:20 +00:00
|
|
|
Iterator* result = NewMergingIterator(
|
2014-04-08 20:40:42 +00:00
|
|
|
&c->column_family_data()->internal_comparator(), list, num);
|
2011-03-18 22:37:00 +00:00
|
|
|
delete[] list;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-11-29 00:42:36 +00:00
|
|
|
// verify that the files listed in this compaction are present
|
2012-10-19 21:00:53 +00:00
|
|
|
// in the current version
|
|
|
|
bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
|
2013-03-06 21:28:54 +00:00
|
|
|
#ifndef NDEBUG
|
2014-02-01 00:45:20 +00:00
|
|
|
Version* version = c->column_family_data()->current();
|
2014-01-22 18:59:07 +00:00
|
|
|
if (c->input_version() != version) {
|
2014-04-25 13:51:16 +00:00
|
|
|
Log(options_->info_log,
|
|
|
|
"[%s] VerifyCompactionFileConsistency version mismatch",
|
|
|
|
c->column_family_data()->GetName().c_str());
|
2012-10-19 21:00:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// verify files in level
|
|
|
|
int level = c->level();
|
|
|
|
for (int i = 0; i < c->num_input_files(0); i++) {
|
2014-06-13 22:54:19 +00:00
|
|
|
uint64_t number = c->input(0, i)->fd.GetNumber();
|
2012-10-19 21:00:53 +00:00
|
|
|
|
|
|
|
// look for this file in the current version
|
|
|
|
bool found = false;
|
2014-01-10 23:12:34 +00:00
|
|
|
for (unsigned int j = 0; j < version->files_[level].size(); j++) {
|
|
|
|
FileMetaData* f = version->files_[level][j];
|
2014-06-13 22:54:19 +00:00
|
|
|
if (f->fd.GetNumber() == number) {
|
2012-10-19 21:00:53 +00:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
return false; // input files non existant in current version
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// verify level+1 files
|
|
|
|
level++;
|
|
|
|
for (int i = 0; i < c->num_input_files(1); i++) {
|
2014-06-13 22:54:19 +00:00
|
|
|
uint64_t number = c->input(1, i)->fd.GetNumber();
|
2012-10-19 21:00:53 +00:00
|
|
|
|
|
|
|
// look for this file in the current version
|
|
|
|
bool found = false;
|
2014-01-10 23:12:34 +00:00
|
|
|
for (unsigned int j = 0; j < version->files_[level].size(); j++) {
|
|
|
|
FileMetaData* f = version->files_[level][j];
|
2014-06-13 22:54:19 +00:00
|
|
|
if (f->fd.GetNumber() == number) {
|
2012-10-19 21:00:53 +00:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
return false; // input files non existant in current version
|
|
|
|
}
|
|
|
|
}
|
2013-03-06 21:28:54 +00:00
|
|
|
#endif
|
2012-10-19 21:00:53 +00:00
|
|
|
return true; // everything good
|
|
|
|
}
|
|
|
|
|
2014-01-16 00:15:43 +00:00
|
|
|
Status VersionSet::GetMetadataForFile(uint64_t number, int* filelevel,
|
2014-02-06 23:42:16 +00:00
|
|
|
FileMetaData** meta,
|
2014-01-27 22:33:50 +00:00
|
|
|
ColumnFamilyData** cfd) {
|
|
|
|
for (auto cfd_iter : *column_family_set_) {
|
2014-01-29 21:28:50 +00:00
|
|
|
Version* version = cfd_iter->current();
|
2014-01-22 01:01:52 +00:00
|
|
|
for (int level = 0; level < version->NumberLevels(); level++) {
|
2014-01-22 19:44:53 +00:00
|
|
|
for (const auto& file : version->files_[level]) {
|
2014-06-13 22:54:19 +00:00
|
|
|
if (file->fd.GetNumber() == number) {
|
2014-02-06 23:42:16 +00:00
|
|
|
*meta = file;
|
2014-01-10 23:12:34 +00:00
|
|
|
*filelevel = level;
|
2014-01-27 22:33:50 +00:00
|
|
|
*cfd = cfd_iter;
|
2014-01-10 23:12:34 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::NotFound("File not present in any level");
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
void VersionSet::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
2014-01-22 19:44:53 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2014-02-03 20:08:33 +00:00
|
|
|
for (int level = 0; level < cfd->NumberLevels(); level++) {
|
2014-01-29 21:28:50 +00:00
|
|
|
for (const auto& file : cfd->current()->files_[level]) {
|
2014-01-10 23:12:34 +00:00
|
|
|
LiveFileMetaData filemetadata;
|
2014-04-30 20:24:52 +00:00
|
|
|
filemetadata.column_family_name = cfd->GetName();
|
2014-07-02 16:54:20 +00:00
|
|
|
uint32_t path_id = file->fd.GetPathId();
|
|
|
|
if (path_id < options_->db_paths.size()) {
|
2014-07-14 22:34:30 +00:00
|
|
|
filemetadata.db_path = options_->db_paths[path_id].path;
|
2014-07-02 16:54:20 +00:00
|
|
|
} else {
|
|
|
|
assert(!options_->db_paths.empty());
|
2014-07-14 22:34:30 +00:00
|
|
|
filemetadata.db_path = options_->db_paths.back().path;
|
2014-07-02 16:54:20 +00:00
|
|
|
}
|
|
|
|
filemetadata.name = MakeTableFileName("", file->fd.GetNumber());
|
2014-01-10 23:12:34 +00:00
|
|
|
filemetadata.level = level;
|
2014-06-13 22:54:19 +00:00
|
|
|
filemetadata.size = file->fd.GetFileSize();
|
2014-01-22 19:44:53 +00:00
|
|
|
filemetadata.smallestkey = file->smallest.user_key().ToString();
|
|
|
|
filemetadata.largestkey = file->largest.user_key().ToString();
|
|
|
|
filemetadata.smallest_seqno = file->smallest_seqno;
|
|
|
|
filemetadata.largest_seqno = file->largest_seqno;
|
2014-01-10 23:12:34 +00:00
|
|
|
metadata->push_back(filemetadata);
|
|
|
|
}
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-12 19:53:26 +00:00
|
|
|
void VersionSet::GetObsoleteFiles(std::vector<FileMetaData*>* files) {
|
2014-01-10 23:12:34 +00:00
|
|
|
files->insert(files->end(), obsolete_files_.begin(), obsolete_files_.end());
|
2013-11-08 23:23:46 +00:00
|
|
|
obsolete_files_.clear();
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
ColumnFamilyData* VersionSet::CreateColumnFamily(
|
|
|
|
const ColumnFamilyOptions& options, VersionEdit* edit) {
|
|
|
|
assert(edit->is_column_family_add_);
|
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
Version* dummy_versions = new Version(nullptr, this);
|
2014-01-22 19:44:53 +00:00
|
|
|
auto new_cfd = column_family_set_->CreateColumnFamily(
|
|
|
|
edit->column_family_name_, edit->column_family_, dummy_versions, options);
|
|
|
|
|
2014-03-18 21:23:47 +00:00
|
|
|
Version* v = new Version(new_cfd, this, current_version_number_++);
|
|
|
|
|
|
|
|
AppendVersion(new_cfd, v);
|
2014-01-24 22:30:28 +00:00
|
|
|
new_cfd->CreateNewMemtable();
|
2014-02-28 19:08:24 +00:00
|
|
|
new_cfd->SetLogNumber(edit->log_number_);
|
2014-01-10 23:12:34 +00:00
|
|
|
return new_cfd;
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|