mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-30 04:41:49 +00:00
3ce3bb3da2
Summary: This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping The conditions for trivial move have been updated Introduced conditions: - Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual) - Input level files cannot be overlapping Removed conditions: - Trivial move only run when the compaction is not manual - Input level should can contain only 1 file More context on what tests failed because of Trivial move ``` DBTest.CompactionsGenerateMultipleFiles This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1 ``` ``` DBTest.NoSpaceCompactRange This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation because trivial move does not need any extra space, and did not check for that ``` ``` DBTest.DropWrites Similar to DBTest.NoSpaceCompactRange ``` ``` DBTest.DeleteObsoleteFilesPendingOutputs This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3 ``` ``` CuckooTableDBTest.CompactionIntoMultipleFiles Same as DBTest.CompactionsGenerateMultipleFiles ``` This diff is based on a work by @sdong https://reviews.facebook.net/D34149 Test Plan: make -j64 check Reviewers: rven, sdong, igor Reviewed By: igor Subscribers: yhchiang, ott, march, dhruba, sdong Differential Revision: https://reviews.facebook.net/D34797
256 lines
9.7 KiB
C++
256 lines
9.7 KiB
C++
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
#include "util/arena.h"
|
|
#include "util/autovector.h"
|
|
#include "util/mutable_cf_options.h"
|
|
#include "db/version_set.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
// The structure that manages compaction input files associated
|
|
// with the same physical level.
|
|
struct CompactionInputFiles {
|
|
int level;
|
|
std::vector<FileMetaData*> files;
|
|
inline bool empty() const { return files.empty(); }
|
|
inline size_t size() const { return files.size(); }
|
|
inline void clear() { files.clear(); }
|
|
inline FileMetaData* operator[](size_t i) const { return files[i]; }
|
|
};
|
|
|
|
class Version;
|
|
class ColumnFamilyData;
|
|
class VersionStorageInfo;
|
|
class CompactionFilter;
|
|
class CompactionFilterV2;
|
|
|
|
// A Compaction encapsulates information about a compaction.
|
|
class Compaction {
|
|
public:
|
|
Compaction(VersionStorageInfo* input_version,
|
|
const MutableCFOptions& mutable_cf_options,
|
|
std::vector<CompactionInputFiles> inputs, int output_level,
|
|
uint64_t target_file_size, uint64_t max_grandparent_overlap_bytes,
|
|
uint32_t output_path_id, CompressionType compression,
|
|
std::vector<FileMetaData*> grandparents,
|
|
bool manual_compaction = false, double score = -1,
|
|
bool deletion_compaction = false);
|
|
|
|
// No copying allowed
|
|
Compaction(const Compaction&) = delete;
|
|
void operator=(const Compaction&) = delete;
|
|
|
|
~Compaction();
|
|
|
|
// Returns the level associated to the specified compaction input level.
|
|
// If compaction_input_level is not specified, then input_level is set to 0.
|
|
int level(size_t compaction_input_level = 0) const {
|
|
return inputs_[compaction_input_level].level;
|
|
}
|
|
|
|
int start_level() const { return start_level_; }
|
|
|
|
// Outputs will go to this level
|
|
int output_level() const { return output_level_; }
|
|
|
|
// Returns the number of input levels in this compaction.
|
|
size_t num_input_levels() const { return inputs_.size(); }
|
|
|
|
// Return the object that holds the edits to the descriptor done
|
|
// by this compaction.
|
|
VersionEdit* edit() { return &edit_; }
|
|
|
|
// Returns the number of input files associated to the specified
|
|
// compaction input level.
|
|
// The function will return 0 if when "compaction_input_level" < 0
|
|
// or "compaction_input_level" >= "num_input_levels()".
|
|
size_t num_input_files(size_t compaction_input_level) const {
|
|
if (compaction_input_level < inputs_.size()) {
|
|
return inputs_[compaction_input_level].size();
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
// Returns input version of the compaction
|
|
Version* input_version() const { return input_version_; }
|
|
|
|
// Returns the ColumnFamilyData associated with the compaction.
|
|
ColumnFamilyData* column_family_data() const { return cfd_; }
|
|
|
|
// Returns the file meta data of the 'i'th input file at the
|
|
// specified compaction input level.
|
|
// REQUIREMENT: "compaction_input_level" must be >= 0 and
|
|
// < "input_levels()"
|
|
FileMetaData* input(size_t compaction_input_level, size_t i) const {
|
|
assert(compaction_input_level < inputs_.size());
|
|
return inputs_[compaction_input_level][i];
|
|
}
|
|
|
|
// Returns the list of file meta data of the specified compaction
|
|
// input level.
|
|
// REQUIREMENT: "compaction_input_level" must be >= 0 and
|
|
// < "input_levels()"
|
|
const std::vector<FileMetaData*>* inputs(size_t compaction_input_level) {
|
|
assert(compaction_input_level < inputs_.size());
|
|
return &inputs_[compaction_input_level].files;
|
|
}
|
|
|
|
// Returns the LevelFilesBrief of the specified compaction input level.
|
|
LevelFilesBrief* input_levels(size_t compaction_input_level) {
|
|
return &input_levels_[compaction_input_level];
|
|
}
|
|
|
|
// Maximum size of files to build during this compaction.
|
|
uint64_t MaxOutputFileSize() const { return max_output_file_size_; }
|
|
|
|
// What compression for output
|
|
CompressionType OutputCompressionType() const { return output_compression_; }
|
|
|
|
// Whether need to write output file to second DB path.
|
|
uint32_t GetOutputPathId() const { return output_path_id_; }
|
|
|
|
// Is this a trivial compaction that can be implemented by just
|
|
// moving a single input file to the next level (no merging or splitting)
|
|
bool IsTrivialMove() const;
|
|
|
|
// If true, then the compaction can be done by simply deleting input files.
|
|
bool IsDeletionCompaction() const {
|
|
return deletion_compaction_;
|
|
}
|
|
|
|
// Add all inputs to this compaction as delete operations to *edit.
|
|
void AddInputDeletions(VersionEdit* edit);
|
|
|
|
// Returns true if the available information we have guarantees that
|
|
// the input "user_key" does not exist in any level beyond "output_level()".
|
|
bool KeyNotExistsBeyondOutputLevel(const Slice& user_key);
|
|
|
|
// Returns true iff we should stop building the current output
|
|
// before processing "internal_key".
|
|
bool ShouldStopBefore(const Slice& internal_key);
|
|
|
|
// Clear all files to indicate that they are not being compacted
|
|
// Delete this compaction from the list of running compactions.
|
|
void ReleaseCompactionFiles(Status status);
|
|
|
|
// Returns the summary of the compaction in "output" with maximum "len"
|
|
// in bytes. The caller is responsible for the memory management of
|
|
// "output".
|
|
void Summary(char* output, int len);
|
|
|
|
// Return the score that was used to pick this compaction run.
|
|
double score() const { return score_; }
|
|
|
|
// Is this compaction creating a file in the bottom most level?
|
|
bool BottomMostLevel() { return bottommost_level_; }
|
|
|
|
// Does this compaction include all sst files?
|
|
bool IsFullCompaction() { return is_full_compaction_; }
|
|
|
|
// Was this compaction triggered manually by the client?
|
|
bool IsManualCompaction() { return is_manual_compaction_; }
|
|
|
|
// Return the MutableCFOptions that should be used throughout the compaction
|
|
// procedure
|
|
const MutableCFOptions* mutable_cf_options() { return &mutable_cf_options_; }
|
|
|
|
// Returns the size in bytes that the output file should be preallocated to.
|
|
// In level compaction, that is max_file_size_. In universal compaction, that
|
|
// is the sum of all input file sizes.
|
|
uint64_t OutputFilePreallocationSize();
|
|
|
|
void SetInputVersion(Version* input_version);
|
|
|
|
struct InputLevelSummaryBuffer {
|
|
char buffer[128];
|
|
};
|
|
|
|
const char* InputLevelSummary(InputLevelSummaryBuffer* scratch) const;
|
|
|
|
uint64_t CalculateTotalInputSize() const;
|
|
|
|
// In case of compaction error, reset the nextIndex that is used
|
|
// to pick up the next file to be compacted from files_by_size_
|
|
void ResetNextCompactionIndex();
|
|
|
|
// Create a CompactionFilter from compaction_filter_factory
|
|
std::unique_ptr<CompactionFilter> CreateCompactionFilter() const;
|
|
|
|
// Create a CompactionFilterV2 from compaction_filter_factory_v2
|
|
std::unique_ptr<CompactionFilterV2> CreateCompactionFilterV2() const;
|
|
|
|
private:
|
|
// mark (or clear) all files that are being compacted
|
|
void MarkFilesBeingCompacted(bool mark_as_compacted);
|
|
|
|
// helper function to determine if compaction with inputs and storage is
|
|
// bottommost
|
|
static bool IsBottommostLevel(
|
|
int output_level, VersionStorageInfo* vstorage,
|
|
const std::vector<CompactionInputFiles>& inputs);
|
|
static bool IsFullCompaction(VersionStorageInfo* vstorage,
|
|
const std::vector<CompactionInputFiles>& inputs);
|
|
|
|
const int start_level_; // the lowest level to be compacted
|
|
const int output_level_; // levels to which output files are stored
|
|
uint64_t max_output_file_size_;
|
|
uint64_t max_grandparent_overlap_bytes_;
|
|
MutableCFOptions mutable_cf_options_;
|
|
Version* input_version_;
|
|
VersionEdit edit_;
|
|
const int number_levels_;
|
|
ColumnFamilyData* cfd_;
|
|
Arena arena_; // Arena used to allocate space for file_levels_
|
|
|
|
const uint32_t output_path_id_;
|
|
CompressionType output_compression_;
|
|
// If true, then the comaction can be done by simply deleting input files.
|
|
const bool deletion_compaction_;
|
|
|
|
// Compaction input files organized by level. Constant after construction
|
|
const std::vector<CompactionInputFiles> inputs_;
|
|
|
|
// A copy of inputs_, organized more closely in memory
|
|
autovector<LevelFilesBrief, 2> input_levels_;
|
|
|
|
// State used to check for number of of overlapping grandparent files
|
|
// (grandparent == "output_level_ + 1")
|
|
std::vector<FileMetaData*> grandparents_;
|
|
size_t grandparent_index_; // Index in grandparent_starts_
|
|
bool seen_key_; // Some output key has been seen
|
|
uint64_t overlapped_bytes_; // Bytes of overlap between current output
|
|
// and grandparent files
|
|
const double score_; // score that was used to pick this compaction.
|
|
|
|
// Is this compaction creating a file in the bottom most level?
|
|
const bool bottommost_level_;
|
|
// Does this compaction include all sst files?
|
|
const bool is_full_compaction_;
|
|
|
|
// Is this compaction requested by the client?
|
|
const bool is_manual_compaction_;
|
|
|
|
// "level_ptrs_" holds indices into "input_version_->levels_", where each
|
|
// index remembers which file of an associated level we are currently used
|
|
// to check KeyNotExistsBeyondOutputLevel() for deletion operation.
|
|
// As it is for checking KeyNotExistsBeyondOutputLevel(), it only
|
|
// records indices for all levels beyond "output_level_".
|
|
std::vector<size_t> level_ptrs_;
|
|
|
|
// Does input compression match the output compression?
|
|
bool InputCompressionMatchesOutput() const;
|
|
};
|
|
|
|
// Utility function
|
|
extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
|
|
|
|
} // namespace rocksdb
|