Merge branch 'performance' of github.com:facebook/rocksdb into performance

This commit is contained in:
Dhruba Borthakur 2013-08-12 10:31:21 -07:00
commit 03bd4461ad
5 changed files with 1200 additions and 900 deletions

View File

@ -1238,13 +1238,17 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
manual.level = level;
manual.done = false;
manual.in_progress = false;
if (begin == nullptr) {
// For universal compaction, we enforce every manual compaction to compact
// all files.
if (begin == nullptr ||
options_.compaction_style == kCompactionStyleUniversal) {
manual.begin = nullptr;
} else {
begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
manual.begin = &begin_storage;
}
if (end == nullptr) {
if (end == nullptr ||
options_.compaction_style == kCompactionStyleUniversal) {
manual.end = nullptr;
} else {
end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
@ -1498,6 +1502,18 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
if (!status.ok()) {
m->done = true;
}
// For universal compaction:
// Because universal compaction always happens at level 0, so one
// compaction will pick up all overlapped files. No files will be
// filtered out due to size limit and left for a successive compaction.
// So we can safely conclude the current compaction.
//
// Also note that, if we don't stop here, then the current compaction
// writes a new file back to level 0, which will be used in successive
// compaction. Hence the manual compaction will never finish.
if (options_.compaction_style == kCompactionStyleUniversal) {
m->done = true;
}
if (!m->done) {
// We only compacted part of the requested range. Update *m
// to the range that is left to be compacted.
@ -1745,14 +1761,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
}
// Is this compaction producing files at the bottommost level?
bool bottommost_level = true;
for (int i = compact->compaction->level() + 2;
i < versions_->NumberLevels(); i++) {
if (versions_->NumLevelFiles(i) > 0) {
bottommost_level = false;
break;
}
}
bool bottommost_level = compact->compaction->BottomMostLevel();
// Allocate the output file numbers before we release the lock
AllocateCompactionOutputFileNumbers(compact);
@ -2088,7 +2097,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
versions_->LevelSummary(&tmp),
(stats.bytes_readn + stats.bytes_readnp1 + stats.bytes_written) /
(double) stats.micros,
compact->compaction->level() + 1,
compact->compaction->output_level(),
stats.files_in_leveln, stats.files_in_levelnp1, stats.files_out_levelnp1,
stats.bytes_readn / 1048576.0,
stats.bytes_readnp1 / 1048576.0,

File diff suppressed because it is too large Load Diff

View File

@ -2266,6 +2266,13 @@ Compaction* VersionSet::PickCompactionUniversal(int level, double score) {
newerfile = f;
}
// Is the earliest file part of this compaction?
int last_index = file_by_time[file_by_time.size()-1];
FileMetaData* last_file = current_->files_[level][last_index];
if (c->inputs_[0][c->inputs_[0].size()-1] == last_file) {
c->bottommost_level_ = true;
}
// update statistics
if (options_->statistics != nullptr) {
options_->statistics->measureTime(NUM_FILES_IN_SINGLE_COMPACTION,
@ -2403,7 +2410,7 @@ Compaction* VersionSet::PickCompaction() {
if (level != 0 || compactions_in_progress_[0].empty()) {
if(!ParentRangeInCompaction(&f->smallest, &f->largest, level,
&parent_index)) {
c = new Compaction(level, level, MaxFileSizeForLevel(level+1),
c = new Compaction(level, level+1, MaxFileSizeForLevel(level+1),
MaxGrandParentOverlapBytes(level), NumberLevels(), true);
c->inputs_[0].push_back(f);
c->parent_index_ = parent_index;
@ -2444,13 +2451,15 @@ Compaction* VersionSet::PickCompaction() {
assert(!c->inputs_[0].empty());
}
// Setup "level+1" files (inputs_[1])
SetupOtherInputs(c);
// mark all the files that are being compacted
c->MarkFilesBeingCompacted(true);
// Is this compaction creating a file at the bottommost level
c->SetupBottomMostLevel(false);
// remember this currently undergoing compaction
compactions_in_progress_[level].insert(c);
@ -2624,6 +2633,13 @@ Compaction* VersionSet::CompactRange(
const InternalKey* begin,
const InternalKey* end) {
std::vector<FileMetaData*> inputs;
// All files are 'overlapping' in universal style compaction.
// We have to compact the entire range in one shot.
if (options_->compaction_style == kCompactionStyleUniversal) {
begin = nullptr;
end = nullptr;
}
current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) {
return nullptr;
@ -2667,6 +2683,9 @@ Compaction* VersionSet::CompactRange(
// upon other files because manual compactions are processed when
// the system has a max of 1 background compaction thread.
c->MarkFilesBeingCompacted(true);
// Is this compaction creating a file at the bottommost level
c->SetupBottomMostLevel(true);
return c;
}
@ -2686,6 +2705,7 @@ Compaction::Compaction(int level, int out_level, uint64_t target_file_size,
base_index_(-1),
parent_index_(-1),
score_(0),
bottommost_level_(false),
level_ptrs_(std::vector<size_t>(number_levels)) {
edit_ = new VersionEdit(number_levels_);
for (int i = 0; i < number_levels_; i++) {
@ -2718,6 +2738,10 @@ void Compaction::AddInputDeletions(VersionEdit* edit) {
}
bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
if (input_version_->vset_->options_->compaction_style ==
kCompactionStyleUniversal) {
return bottommost_level_;
}
// Maybe use binary search to find right entry instead of linear search?
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
for (int lvl = level_ + 2; lvl < number_levels_; lvl++) {
@ -2776,6 +2800,31 @@ void Compaction::MarkFilesBeingCompacted(bool value) {
}
}
// Is this compaction producing files at the bottommost level?
void Compaction::SetupBottomMostLevel(bool isManual) {
if (input_version_->vset_->options_->compaction_style ==
kCompactionStyleUniversal) {
// If universal compaction style is used and manual
// compaction is occuring, then we are guaranteed that
// all files will be picked in a single compaction
// run. We can safely set bottommost_level_ = true.
// If it is not manual compaction, then bottommost_level_
// is already set when the Compaction was created.
if (isManual) {
bottommost_level_ = true;
}
return;
}
bottommost_level_ = true;
int num_levels = input_version_->vset_->NumberLevels();
for (int i = level() + 2; i < num_levels; i++) {
if (input_version_->vset_->NumLevelFiles(i) > 0) {
bottommost_level_ = false;
break;
}
}
}
void Compaction::ReleaseInputs() {
if (input_version_ != nullptr) {
input_version_->Unref();

View File

@ -557,6 +557,9 @@ class Compaction {
// Return the score that was used to pick this compaction run.
double score() const { return score_; }
// Is this compaction creating a file in the bottom most level?
bool BottomMostLevel() { return bottommost_level_; }
private:
friend class Version;
friend class VersionSet;
@ -589,7 +592,8 @@ class Compaction {
int parent_index_; // index of some file with same range in files_[level_+1]
double score_; // score that was used to pick this compaction.
// State for implementing IsBaseLevelForKey
// Is this compaction creating a file in the bottom most level?
bool bottommost_level_;
// level_ptrs_ holds indices into input_version_->levels_: our state
// is that we are positioned at one of the file ranges for each
@ -600,6 +604,9 @@ class Compaction {
// mark (or clear) all files that are being compacted
void MarkFilesBeingCompacted(bool);
// Initialize whether compaction producing files at the bottommost level
void SetupBottomMostLevel(bool isManual);
// In case of compaction error, reset the nextIndex that is used
// to pick up the next file to be compacted from files_by_size_
void ResetNextCompactionIndex();

View File

@ -185,7 +185,7 @@ Options::Dump(Logger* log) const
max_background_compactions);
Log(log," Options.hard_rate_limit: %.2f",
hard_rate_limit);
Log(log," Options.rate_limit_delay_max_milliseconds: %d",
Log(log," Options.rate_limit_delay_max_milliseconds: %u",
rate_limit_delay_max_milliseconds);
Log(log," Options.disable_auto_compactions: %d",
disable_auto_compactions);
@ -205,7 +205,7 @@ Options::Dump(Logger* log) const
is_fd_close_on_exec);
Log(log," Options.skip_log_error_on_recovery: %d",
skip_log_error_on_recovery);
Log(log," Options.stats_dump_period_sec: %d",
Log(log," Options.stats_dump_period_sec: %u",
stats_dump_period_sec);
Log(log," Options.block_size_deviation: %d",
block_size_deviation);
@ -221,11 +221,11 @@ Options::Dump(Logger* log) const
filter_deletes);
Log(log," Options.compaction_style: %d",
compaction_style);
Log(log," Options.compaction_options_universal.size_ratio: %d",
Log(log," Options.compaction_options_universal.size_ratio: %u",
compaction_options_universal.size_ratio);
Log(log," Options.compaction_options_universal.min_merge_width: %d",
Log(log," Options.compaction_options_universal.min_merge_width: %u",
compaction_options_universal.min_merge_width);
Log(log," Options.compaction_options_universal.max_merge_width: %d",
Log(log," Options.compaction_options_universal.max_merge_width: %u",
compaction_options_universal.max_merge_width);
} // Options::Dump