2014-01-22 19:44:53 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2014-01-25 02:40:05 +00:00
|
|
|
#include <unordered_map>
|
2014-01-22 19:44:53 +00:00
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2014-02-06 19:44:50 +00:00
|
|
|
#include <atomic>
|
2014-01-22 19:44:53 +00:00
|
|
|
|
2014-01-24 22:30:28 +00:00
|
|
|
#include "rocksdb/options.h"
|
2014-03-11 21:52:17 +00:00
|
|
|
#include "rocksdb/db.h"
|
2014-02-05 00:31:18 +00:00
|
|
|
#include "rocksdb/env.h"
|
2014-02-06 23:42:16 +00:00
|
|
|
#include "db/memtable_list.h"
|
2014-01-28 19:05:04 +00:00
|
|
|
#include "db/write_batch_internal.h"
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
#include "db/write_controller.h"
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
#include "db/table_cache.h"
|
2014-03-04 01:54:04 +00:00
|
|
|
#include "util/thread_local.h"
|
2014-09-11 01:46:09 +00:00
|
|
|
#include "db/flush_scheduler.h"
|
2014-09-17 19:49:13 +00:00
|
|
|
#include "util/mutable_cf_options.h"
|
2014-01-24 22:30:28 +00:00
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class Version;
|
|
|
|
class VersionSet;
|
2014-01-24 22:30:28 +00:00
|
|
|
class MemTable;
|
|
|
|
class MemTableListVersion;
|
2014-01-31 23:30:27 +00:00
|
|
|
class CompactionPicker;
|
|
|
|
class Compaction;
|
|
|
|
class InternalKey;
|
2014-02-05 01:45:19 +00:00
|
|
|
class InternalStats;
|
2014-02-11 01:04:44 +00:00
|
|
|
class ColumnFamilyData;
|
|
|
|
class DBImpl;
|
2014-03-11 00:25:10 +00:00
|
|
|
class LogBuffer;
|
2014-02-11 01:04:44 +00:00
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
// ColumnFamilyHandleImpl is the class that clients use to access different
|
|
|
|
// column families. It has non-trivial destructor, which gets called when client
|
|
|
|
// is done using the column family
|
2014-02-11 01:04:44 +00:00
|
|
|
class ColumnFamilyHandleImpl : public ColumnFamilyHandle {
|
|
|
|
public:
|
|
|
|
// create while holding the mutex
|
|
|
|
ColumnFamilyHandleImpl(ColumnFamilyData* cfd, DBImpl* db, port::Mutex* mutex);
|
|
|
|
// destroy without mutex
|
|
|
|
virtual ~ColumnFamilyHandleImpl();
|
|
|
|
virtual ColumnFamilyData* cfd() const { return cfd_; }
|
2014-09-22 18:37:35 +00:00
|
|
|
virtual const Comparator* user_comparator() const;
|
2014-02-11 01:04:44 +00:00
|
|
|
|
2014-03-14 18:26:13 +00:00
|
|
|
virtual uint32_t GetID() const;
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
virtual const std::string& GetName() const override;
|
2014-02-26 01:30:54 +00:00
|
|
|
|
2014-02-11 01:04:44 +00:00
|
|
|
private:
|
|
|
|
ColumnFamilyData* cfd_;
|
|
|
|
DBImpl* db_;
|
|
|
|
port::Mutex* mutex_;
|
|
|
|
};
|
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
// Does not ref-count ColumnFamilyData
|
|
|
|
// We use this dummy ColumnFamilyHandleImpl because sometimes MemTableInserter
|
|
|
|
// calls DBImpl methods. When this happens, MemTableInserter need access to
|
|
|
|
// ColumnFamilyHandle (same as the client would need). In that case, we feed
|
|
|
|
// MemTableInserter dummy ColumnFamilyHandle and enable it to call DBImpl
|
|
|
|
// methods
|
2014-02-11 01:04:44 +00:00
|
|
|
class ColumnFamilyHandleInternal : public ColumnFamilyHandleImpl {
|
|
|
|
public:
|
|
|
|
ColumnFamilyHandleInternal()
|
|
|
|
: ColumnFamilyHandleImpl(nullptr, nullptr, nullptr) {}
|
|
|
|
|
2014-11-06 19:14:28 +00:00
|
|
|
void SetCFD(ColumnFamilyData* _cfd) { internal_cfd_ = _cfd; }
|
2014-02-11 01:04:44 +00:00
|
|
|
virtual ColumnFamilyData* cfd() const override { return internal_cfd_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
ColumnFamilyData* internal_cfd_;
|
|
|
|
};
|
2014-01-24 22:30:28 +00:00
|
|
|
|
|
|
|
// holds references to memtable, all immutable memtables and version
|
|
|
|
struct SuperVersion {
|
|
|
|
MemTable* mem;
|
|
|
|
MemTableListVersion* imm;
|
|
|
|
Version* current;
|
2014-09-17 19:49:13 +00:00
|
|
|
MutableCFOptions mutable_cf_options;
|
2014-01-24 22:30:28 +00:00
|
|
|
std::atomic<uint32_t> refs;
|
|
|
|
// We need to_delete because during Cleanup(), imm->Unref() returns
|
|
|
|
// all memtables that we need to free through this vector. We then
|
|
|
|
// delete all those memtables outside of mutex, during destruction
|
2014-02-06 23:42:16 +00:00
|
|
|
autovector<MemTable*> to_delete;
|
2014-03-04 01:54:04 +00:00
|
|
|
// Version number of the current SuperVersion
|
|
|
|
uint64_t version_number;
|
|
|
|
port::Mutex* db_mutex;
|
2014-01-24 22:30:28 +00:00
|
|
|
|
|
|
|
// should be called outside the mutex
|
2014-02-12 22:01:30 +00:00
|
|
|
SuperVersion() = default;
|
2014-01-24 22:30:28 +00:00
|
|
|
~SuperVersion();
|
|
|
|
SuperVersion* Ref();
|
2014-04-07 21:21:25 +00:00
|
|
|
|
2014-01-24 22:30:28 +00:00
|
|
|
bool Unref();
|
|
|
|
|
|
|
|
// call these two methods with db mutex held
|
|
|
|
// Cleanup unrefs mem, imm and current. Also, it stores all memtables
|
|
|
|
// that needs to be deleted in to_delete vector. Unrefing those
|
|
|
|
// objects needs to be done in the mutex
|
|
|
|
void Cleanup();
|
|
|
|
void Init(MemTable* new_mem, MemTableListVersion* new_imm,
|
|
|
|
Version* new_current);
|
2014-03-08 00:59:47 +00:00
|
|
|
|
|
|
|
// The value of dummy is not actually used. kSVInUse takes its address as a
|
|
|
|
// mark in the thread local storage to indicate the SuperVersion is in use
|
|
|
|
// by thread. This way, the value of kSVInUse is guaranteed to have no
|
|
|
|
// conflict with SuperVersion object address and portable on different
|
|
|
|
// platform.
|
|
|
|
static int dummy;
|
|
|
|
static void* const kSVInUse;
|
|
|
|
static void* const kSVObsolete;
|
2014-01-24 22:30:28 +00:00
|
|
|
};
|
2014-01-22 19:44:53 +00:00
|
|
|
|
2014-02-05 00:31:18 +00:00
|
|
|
extern ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
|
|
|
|
const ColumnFamilyOptions& src);
|
|
|
|
|
2014-02-11 01:04:44 +00:00
|
|
|
class ColumnFamilySet;
|
|
|
|
|
2015-01-06 20:44:21 +00:00
|
|
|
// This class keeps all the data that a column family needs.
|
2014-03-11 21:52:17 +00:00
|
|
|
// Most methods require DB mutex held, unless otherwise noted
|
2014-01-29 21:28:50 +00:00
|
|
|
class ColumnFamilyData {
|
|
|
|
public:
|
2014-02-11 01:04:44 +00:00
|
|
|
~ColumnFamilyData();
|
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
// thread-safe
|
2014-01-29 21:28:50 +00:00
|
|
|
uint32_t GetID() const { return id_; }
|
2014-03-11 21:52:17 +00:00
|
|
|
// thread-safe
|
|
|
|
const std::string& GetName() const { return name_; }
|
2014-01-29 21:28:50 +00:00
|
|
|
|
2015-01-26 19:48:07 +00:00
|
|
|
// Ref() can only be called whily holding a DB mutex or during a
|
|
|
|
// single-threaded write.
|
|
|
|
void Ref() { refs_.fetch_add(1, std::memory_order_relaxed); }
|
2014-02-11 01:04:44 +00:00
|
|
|
// will just decrease reference count to 0, but will not delete it. returns
|
2014-04-07 21:21:25 +00:00
|
|
|
// true if the ref count was decreased to zero. in that case, it can be
|
2014-09-09 22:20:49 +00:00
|
|
|
// deleted by the caller immediately, or later, by calling
|
2014-04-07 21:21:25 +00:00
|
|
|
// FreeDeadColumnFamilies()
|
2015-01-26 19:48:07 +00:00
|
|
|
// Unref() can only be called while holding a DB mutex
|
2014-02-11 01:04:44 +00:00
|
|
|
bool Unref() {
|
2015-01-26 19:48:07 +00:00
|
|
|
int old_refs = refs_.fetch_sub(1, std::memory_order_relaxed);
|
|
|
|
assert(old_refs > 0);
|
|
|
|
return old_refs == 1;
|
2014-02-11 01:04:44 +00:00
|
|
|
}
|
|
|
|
|
2015-01-06 20:44:21 +00:00
|
|
|
// SetDropped() can only be called under following conditions:
|
|
|
|
// 1) Holding a DB mutex,
|
|
|
|
// 2) from single-threaded write thread, AND
|
|
|
|
// 3) from single-threaded VersionSet::LogAndApply()
|
2014-03-11 21:52:17 +00:00
|
|
|
// After dropping column family no other operation on that column family
|
|
|
|
// will be executed. All the files and memory will be, however, kept around
|
|
|
|
// until client drops the column family handle. That way, client can still
|
|
|
|
// access data from dropped column family.
|
|
|
|
// Column family can be dropped and still alive. In that state:
|
|
|
|
// *) Column family is not included in the iteration.
|
|
|
|
// *) Compaction and flush is not executed on the dropped column family.
|
2015-01-06 20:44:21 +00:00
|
|
|
// *) Client can continue reading from column family. Writes will fail unless
|
|
|
|
// WriteOptions::ignore_missing_column_families is true
|
2014-03-11 21:52:17 +00:00
|
|
|
// When the dropped column family is unreferenced, then we:
|
|
|
|
// *) delete all memory associated with that column family
|
|
|
|
// *) delete all the files associated with that column family
|
2015-01-06 20:44:21 +00:00
|
|
|
void SetDropped();
|
2014-03-11 21:52:17 +00:00
|
|
|
bool IsDropped() const { return dropped_; }
|
2014-02-11 01:04:44 +00:00
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
// thread-safe
|
2014-10-23 22:34:21 +00:00
|
|
|
int NumberLevels() const { return ioptions_.num_levels; }
|
2014-01-31 23:30:27 +00:00
|
|
|
|
2014-01-29 21:28:50 +00:00
|
|
|
void SetLogNumber(uint64_t log_number) { log_number_ = log_number; }
|
|
|
|
uint64_t GetLogNumber() const { return log_number_; }
|
|
|
|
|
2014-11-18 18:20:10 +00:00
|
|
|
// !!! To be deprecated! Please don't not use this function anymore!
|
2014-09-17 19:49:13 +00:00
|
|
|
const Options* options() const { return &options_; }
|
2014-11-18 18:20:10 +00:00
|
|
|
|
|
|
|
// thread-safe
|
2014-04-14 17:48:01 +00:00
|
|
|
const EnvOptions* soptions() const;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions* ioptions() const { return &ioptions_; }
|
2014-09-17 19:49:13 +00:00
|
|
|
// REQUIRES: DB mutex held
|
|
|
|
// This returns the MutableCFOptions used by current SuperVersion
|
|
|
|
// You shoul use this API to reference MutableCFOptions most of the time.
|
2014-11-06 19:14:28 +00:00
|
|
|
const MutableCFOptions* GetCurrentMutableCFOptions() const {
|
2014-09-17 19:49:13 +00:00
|
|
|
return &(super_version_->mutable_cf_options);
|
|
|
|
}
|
|
|
|
// REQUIRES: DB mutex held
|
|
|
|
// This returns the latest MutableCFOptions, which may be not in effect yet.
|
|
|
|
const MutableCFOptions* GetLatestMutableCFOptions() const {
|
|
|
|
return &mutable_cf_options_;
|
|
|
|
}
|
2014-11-13 21:45:33 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-09-17 19:49:13 +00:00
|
|
|
// REQUIRES: DB mutex held
|
2014-11-05 00:23:05 +00:00
|
|
|
Status SetOptions(
|
2014-09-17 19:49:13 +00:00
|
|
|
const std::unordered_map<std::string, std::string>& options_map);
|
2014-11-13 21:45:33 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
2014-03-11 21:52:17 +00:00
|
|
|
|
|
|
|
InternalStats* internal_stats() { return internal_stats_.get(); }
|
2014-01-29 21:28:50 +00:00
|
|
|
|
|
|
|
MemTableList* imm() { return &imm_; }
|
|
|
|
MemTable* mem() { return mem_; }
|
|
|
|
Version* current() { return current_; }
|
|
|
|
Version* dummy_versions() { return dummy_versions_; }
|
2014-01-30 23:23:13 +00:00
|
|
|
void SetCurrent(Version* current);
|
2014-12-02 20:09:20 +00:00
|
|
|
MemTable* ConstructNewMemtable(const MutableCFOptions& mutable_cf_options);
|
|
|
|
void SetMemtable(MemTable* new_mem) { mem_ = new_mem; }
|
2014-10-27 19:10:13 +00:00
|
|
|
void CreateNewMemtable(const MutableCFOptions& mutable_cf_options);
|
2014-01-29 21:28:50 +00:00
|
|
|
|
2014-05-30 21:31:55 +00:00
|
|
|
TableCache* table_cache() const { return table_cache_.get(); }
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
// See documentation in compaction_picker.h
|
2014-10-01 23:19:16 +00:00
|
|
|
// REQUIRES: DB mutex held
|
Rewritten system for scheduling background work
Summary:
When scaling to higher number of column families, the worst bottleneck was MaybeScheduleFlushOrCompaction(), which did a for loop over all column families while holding a mutex. This patch addresses the issue.
The approach is similar to our earlier efforts: instead of a pull-model, where we do something for every column family, we can do a push-based model -- when we detect that column family is ready to be flushed/compacted, we add it to the flush_queue_/compaction_queue_. That way we don't need to loop over every column family in MaybeScheduleFlushOrCompaction.
Here are the performance results:
Command:
./db_bench --write_buffer_size=268435456 --db_write_buffer_size=268435456 --db=/fast-rocksdb-tmp/rocks_lots_of_cf --use_existing_db=0 --open_files=55000 --statistics=1 --histogram=1 --disable_data_sync=1 --max_write_buffer_number=2 --sync=0 --benchmarks=fillrandom --threads=16 --num_column_families=5000 --disable_wal=1 --max_background_flushes=16 --max_background_compactions=16 --level0_file_num_compaction_trigger=2 --level0_slowdown_writes_trigger=2 --level0_stop_writes_trigger=3 --hard_rate_limit=1 --num=33333333 --writes=33333333
Before the patch:
fillrandom : 26.950 micros/op 37105 ops/sec; 4.1 MB/s
After the patch:
fillrandom : 17.404 micros/op 57456 ops/sec; 6.4 MB/s
Next bottleneck is VersionSet::AddLiveFiles, which is painfully slow when we have a lot of files. This is coming in the next patch, but when I removed that code, here's what I got:
fillrandom : 7.590 micros/op 131758 ops/sec; 14.6 MB/s
Test Plan:
make check
two stress tests:
Big number of compactions and flushes:
./db_stress --threads=30 --ops_per_thread=20000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=15 --max_background_compactions=10 --max_background_flushes=10 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
max_background_flushes=0, to verify that this case also works correctly
./db_stress --threads=30 --ops_per_thread=2000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=3 --max_background_compactions=3 --max_background_flushes=0 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
Reviewers: ljin, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D30123
2014-12-19 19:38:12 +00:00
|
|
|
bool NeedsCompaction() const;
|
|
|
|
// REQUIRES: DB mutex held
|
2014-10-01 23:19:16 +00:00
|
|
|
Compaction* PickCompaction(const MutableCFOptions& mutable_options,
|
|
|
|
LogBuffer* log_buffer);
|
Rewritten system for scheduling background work
Summary:
When scaling to higher number of column families, the worst bottleneck was MaybeScheduleFlushOrCompaction(), which did a for loop over all column families while holding a mutex. This patch addresses the issue.
The approach is similar to our earlier efforts: instead of a pull-model, where we do something for every column family, we can do a push-based model -- when we detect that column family is ready to be flushed/compacted, we add it to the flush_queue_/compaction_queue_. That way we don't need to loop over every column family in MaybeScheduleFlushOrCompaction.
Here are the performance results:
Command:
./db_bench --write_buffer_size=268435456 --db_write_buffer_size=268435456 --db=/fast-rocksdb-tmp/rocks_lots_of_cf --use_existing_db=0 --open_files=55000 --statistics=1 --histogram=1 --disable_data_sync=1 --max_write_buffer_number=2 --sync=0 --benchmarks=fillrandom --threads=16 --num_column_families=5000 --disable_wal=1 --max_background_flushes=16 --max_background_compactions=16 --level0_file_num_compaction_trigger=2 --level0_slowdown_writes_trigger=2 --level0_stop_writes_trigger=3 --hard_rate_limit=1 --num=33333333 --writes=33333333
Before the patch:
fillrandom : 26.950 micros/op 37105 ops/sec; 4.1 MB/s
After the patch:
fillrandom : 17.404 micros/op 57456 ops/sec; 6.4 MB/s
Next bottleneck is VersionSet::AddLiveFiles, which is painfully slow when we have a lot of files. This is coming in the next patch, but when I removed that code, here's what I got:
fillrandom : 7.590 micros/op 131758 ops/sec; 14.6 MB/s
Test Plan:
make check
two stress tests:
Big number of compactions and flushes:
./db_stress --threads=30 --ops_per_thread=20000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=15 --max_background_compactions=10 --max_background_flushes=10 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
max_background_flushes=0, to verify that this case also works correctly
./db_stress --threads=30 --ops_per_thread=2000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=3 --max_background_compactions=3 --max_background_flushes=0 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
Reviewers: ljin, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D30123
2014-12-19 19:38:12 +00:00
|
|
|
// REQUIRES: DB mutex held
|
2014-10-01 23:19:16 +00:00
|
|
|
Compaction* CompactRange(
|
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
int input_level, int output_level, uint32_t output_path_id,
|
|
|
|
const InternalKey* begin, const InternalKey* end,
|
|
|
|
InternalKey** compaction_end);
|
2014-01-31 23:30:27 +00:00
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
CompactionPicker* compaction_picker() { return compaction_picker_.get(); }
|
|
|
|
// thread-safe
|
2014-02-05 00:31:18 +00:00
|
|
|
const Comparator* user_comparator() const {
|
|
|
|
return internal_comparator_.user_comparator();
|
|
|
|
}
|
2014-03-11 21:52:17 +00:00
|
|
|
// thread-safe
|
2014-02-05 00:31:18 +00:00
|
|
|
const InternalKeyComparator& internal_comparator() const {
|
|
|
|
return internal_comparator_;
|
|
|
|
}
|
2014-01-31 23:30:27 +00:00
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
SuperVersion* GetSuperVersion() { return super_version_; }
|
|
|
|
// thread-safe
|
2014-04-14 16:34:59 +00:00
|
|
|
// Return a already referenced SuperVersion to be used safely.
|
|
|
|
SuperVersion* GetReferencedSuperVersion(port::Mutex* db_mutex);
|
|
|
|
// thread-safe
|
|
|
|
// Get SuperVersion stored in thread local storage. If it does not exist,
|
|
|
|
// get a reference from a current SuperVersion.
|
|
|
|
SuperVersion* GetThreadLocalSuperVersion(port::Mutex* db_mutex);
|
|
|
|
// Try to return SuperVersion back to thread local storage. Retrun true on
|
|
|
|
// success and false on failure. It fails when the thread local storage
|
|
|
|
// contains anything other than SuperVersion::kSVInUse flag.
|
|
|
|
bool ReturnThreadLocalSuperVersion(SuperVersion* sv);
|
2014-03-11 21:52:17 +00:00
|
|
|
// thread-safe
|
2014-01-29 21:28:50 +00:00
|
|
|
uint64_t GetSuperVersionNumber() const {
|
|
|
|
return super_version_number_.load();
|
|
|
|
}
|
|
|
|
// will return a pointer to SuperVersion* if previous SuperVersion
|
|
|
|
// if its reference count is zero and needs deletion or nullptr if not
|
|
|
|
// As argument takes a pointer to allocated SuperVersion to enable
|
|
|
|
// the clients to allocate SuperVersion outside of mutex.
|
Rewritten system for scheduling background work
Summary:
When scaling to higher number of column families, the worst bottleneck was MaybeScheduleFlushOrCompaction(), which did a for loop over all column families while holding a mutex. This patch addresses the issue.
The approach is similar to our earlier efforts: instead of a pull-model, where we do something for every column family, we can do a push-based model -- when we detect that column family is ready to be flushed/compacted, we add it to the flush_queue_/compaction_queue_. That way we don't need to loop over every column family in MaybeScheduleFlushOrCompaction.
Here are the performance results:
Command:
./db_bench --write_buffer_size=268435456 --db_write_buffer_size=268435456 --db=/fast-rocksdb-tmp/rocks_lots_of_cf --use_existing_db=0 --open_files=55000 --statistics=1 --histogram=1 --disable_data_sync=1 --max_write_buffer_number=2 --sync=0 --benchmarks=fillrandom --threads=16 --num_column_families=5000 --disable_wal=1 --max_background_flushes=16 --max_background_compactions=16 --level0_file_num_compaction_trigger=2 --level0_slowdown_writes_trigger=2 --level0_stop_writes_trigger=3 --hard_rate_limit=1 --num=33333333 --writes=33333333
Before the patch:
fillrandom : 26.950 micros/op 37105 ops/sec; 4.1 MB/s
After the patch:
fillrandom : 17.404 micros/op 57456 ops/sec; 6.4 MB/s
Next bottleneck is VersionSet::AddLiveFiles, which is painfully slow when we have a lot of files. This is coming in the next patch, but when I removed that code, here's what I got:
fillrandom : 7.590 micros/op 131758 ops/sec; 14.6 MB/s
Test Plan:
make check
two stress tests:
Big number of compactions and flushes:
./db_stress --threads=30 --ops_per_thread=20000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=15 --max_background_compactions=10 --max_background_flushes=10 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
max_background_flushes=0, to verify that this case also works correctly
./db_stress --threads=30 --ops_per_thread=2000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=3 --max_background_compactions=3 --max_background_flushes=0 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
Reviewers: ljin, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D30123
2014-12-19 19:38:12 +00:00
|
|
|
// IMPORTANT: Only call this from DBImpl::InstallSuperVersion()
|
2014-09-17 19:49:13 +00:00
|
|
|
SuperVersion* InstallSuperVersion(SuperVersion* new_superversion,
|
|
|
|
port::Mutex* db_mutex,
|
|
|
|
const MutableCFOptions& mutable_cf_options);
|
2014-03-04 01:54:04 +00:00
|
|
|
SuperVersion* InstallSuperVersion(SuperVersion* new_superversion,
|
|
|
|
port::Mutex* db_mutex);
|
|
|
|
|
|
|
|
void ResetThreadLocalSuperVersions();
|
2014-01-29 21:28:50 +00:00
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
void NotifyOnFlushCompleted(
|
|
|
|
DB* db, const std::string& file_path,
|
|
|
|
bool triggered_flush_slowdown,
|
|
|
|
bool triggered_flush_stop);
|
|
|
|
|
Rewritten system for scheduling background work
Summary:
When scaling to higher number of column families, the worst bottleneck was MaybeScheduleFlushOrCompaction(), which did a for loop over all column families while holding a mutex. This patch addresses the issue.
The approach is similar to our earlier efforts: instead of a pull-model, where we do something for every column family, we can do a push-based model -- when we detect that column family is ready to be flushed/compacted, we add it to the flush_queue_/compaction_queue_. That way we don't need to loop over every column family in MaybeScheduleFlushOrCompaction.
Here are the performance results:
Command:
./db_bench --write_buffer_size=268435456 --db_write_buffer_size=268435456 --db=/fast-rocksdb-tmp/rocks_lots_of_cf --use_existing_db=0 --open_files=55000 --statistics=1 --histogram=1 --disable_data_sync=1 --max_write_buffer_number=2 --sync=0 --benchmarks=fillrandom --threads=16 --num_column_families=5000 --disable_wal=1 --max_background_flushes=16 --max_background_compactions=16 --level0_file_num_compaction_trigger=2 --level0_slowdown_writes_trigger=2 --level0_stop_writes_trigger=3 --hard_rate_limit=1 --num=33333333 --writes=33333333
Before the patch:
fillrandom : 26.950 micros/op 37105 ops/sec; 4.1 MB/s
After the patch:
fillrandom : 17.404 micros/op 57456 ops/sec; 6.4 MB/s
Next bottleneck is VersionSet::AddLiveFiles, which is painfully slow when we have a lot of files. This is coming in the next patch, but when I removed that code, here's what I got:
fillrandom : 7.590 micros/op 131758 ops/sec; 14.6 MB/s
Test Plan:
make check
two stress tests:
Big number of compactions and flushes:
./db_stress --threads=30 --ops_per_thread=20000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=15 --max_background_compactions=10 --max_background_flushes=10 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
max_background_flushes=0, to verify that this case also works correctly
./db_stress --threads=30 --ops_per_thread=2000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=3 --max_background_compactions=3 --max_background_flushes=0 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
Reviewers: ljin, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D30123
2014-12-19 19:38:12 +00:00
|
|
|
// Protected by DB mutex
|
|
|
|
void set_pending_flush(bool value) { pending_flush_ = value; }
|
|
|
|
void set_pending_compaction(bool value) { pending_compaction_ = value; }
|
|
|
|
bool pending_flush() { return pending_flush_; }
|
|
|
|
bool pending_compaction() { return pending_compaction_; }
|
|
|
|
|
2014-01-29 21:28:50 +00:00
|
|
|
private:
|
2014-01-31 00:49:46 +00:00
|
|
|
friend class ColumnFamilySet;
|
2014-07-30 20:53:08 +00:00
|
|
|
ColumnFamilyData(uint32_t id, const std::string& name,
|
|
|
|
Version* dummy_versions, Cache* table_cache,
|
2014-12-02 20:09:20 +00:00
|
|
|
WriteBuffer* write_buffer,
|
2014-07-30 20:53:08 +00:00
|
|
|
const ColumnFamilyOptions& options,
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
const DBOptions* db_options, const EnvOptions& env_options,
|
2014-02-11 01:04:44 +00:00
|
|
|
ColumnFamilySet* column_family_set);
|
2014-01-31 00:49:46 +00:00
|
|
|
|
Cache some conditions for DBImpl::MakeRoomForWrite
Summary:
Task 4580155. Some conditions in DBImpl::MakeRoomForWrite can be cached in
ColumnFamilyData, because theirs value can be changed only during compaction,
adding new memtable and/or add recalculation of compaction score.
These conditions are:
cfd->imm()->size() == cfd->options()->max_write_buffer_number - 1
cfd->current()->NumLevelFiles(0) >= cfd->options()->level0_stop_writes_trigger
cfd->options()->soft_rate_limit > 0.0 &&
(score = cfd->current()->MaxCompactionScore()) > cfd->options()->soft_rate_limit
cfd->options()->hard_rate_limit > 1.0 &&
(score = cfd->current()->MaxCompactionScore()) > cfd->options()->hard_rate_limit
P.S.
As it's my first diff, Siying suggested to add everybody as a reviewers
for this diff. Sorry, if I forgot someone or add someone by mistake.
Test Plan: make all check
Reviewers: haobo, xjin, dhruba, yhchiang, zagfox, ljin, sdong
Reviewed By: sdong
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D19311
2014-06-26 23:45:27 +00:00
|
|
|
// Recalculate some small conditions, which are changed only during
|
|
|
|
// compaction, adding new memtable and/or
|
|
|
|
// recalculation of compaction score. These values are used in
|
|
|
|
// DBImpl::MakeRoomForWrite function to decide, if it need to make
|
|
|
|
// a write stall
|
2014-10-01 23:19:16 +00:00
|
|
|
void RecalculateWriteStallConditions(
|
|
|
|
const MutableCFOptions& mutable_cf_options);
|
Cache some conditions for DBImpl::MakeRoomForWrite
Summary:
Task 4580155. Some conditions in DBImpl::MakeRoomForWrite can be cached in
ColumnFamilyData, because theirs value can be changed only during compaction,
adding new memtable and/or add recalculation of compaction score.
These conditions are:
cfd->imm()->size() == cfd->options()->max_write_buffer_number - 1
cfd->current()->NumLevelFiles(0) >= cfd->options()->level0_stop_writes_trigger
cfd->options()->soft_rate_limit > 0.0 &&
(score = cfd->current()->MaxCompactionScore()) > cfd->options()->soft_rate_limit
cfd->options()->hard_rate_limit > 1.0 &&
(score = cfd->current()->MaxCompactionScore()) > cfd->options()->hard_rate_limit
P.S.
As it's my first diff, Siying suggested to add everybody as a reviewers
for this diff. Sorry, if I forgot someone or add someone by mistake.
Test Plan: make all check
Reviewers: haobo, xjin, dhruba, yhchiang, zagfox, ljin, sdong
Reviewed By: sdong
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D19311
2014-06-26 23:45:27 +00:00
|
|
|
|
2014-01-29 21:28:50 +00:00
|
|
|
uint32_t id_;
|
|
|
|
const std::string name_;
|
|
|
|
Version* dummy_versions_; // Head of circular doubly-linked list of versions.
|
|
|
|
Version* current_; // == dummy_versions->prev_
|
|
|
|
|
2015-01-26 19:48:07 +00:00
|
|
|
std::atomic<int> refs_; // outstanding references to ColumnFamilyData
|
2014-03-11 21:52:17 +00:00
|
|
|
bool dropped_; // true if client dropped it
|
2014-02-11 01:04:44 +00:00
|
|
|
|
2014-02-05 00:31:18 +00:00
|
|
|
const InternalKeyComparator internal_comparator_;
|
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
const Options options_;
|
|
|
|
const ImmutableCFOptions ioptions_;
|
2014-09-17 19:49:13 +00:00
|
|
|
MutableCFOptions mutable_cf_options_;
|
2014-01-31 23:30:27 +00:00
|
|
|
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
std::unique_ptr<TableCache> table_cache_;
|
|
|
|
|
2014-02-05 01:45:19 +00:00
|
|
|
std::unique_ptr<InternalStats> internal_stats_;
|
|
|
|
|
2014-12-02 20:09:20 +00:00
|
|
|
WriteBuffer* write_buffer_;
|
|
|
|
|
2014-01-29 21:28:50 +00:00
|
|
|
MemTable* mem_;
|
|
|
|
MemTableList imm_;
|
|
|
|
SuperVersion* super_version_;
|
|
|
|
|
|
|
|
// An ordinal representing the current SuperVersion. Updated by
|
|
|
|
// InstallSuperVersion(), i.e. incremented every time super_version_
|
|
|
|
// changes.
|
|
|
|
std::atomic<uint64_t> super_version_number_;
|
|
|
|
|
2014-03-04 01:54:04 +00:00
|
|
|
// Thread's local copy of SuperVersion pointer
|
|
|
|
// This needs to be destructed before mutex_
|
2014-03-04 17:03:56 +00:00
|
|
|
std::unique_ptr<ThreadLocalPtr> local_sv_;
|
2014-03-04 01:54:04 +00:00
|
|
|
|
2014-01-31 00:49:46 +00:00
|
|
|
// pointers for a circular linked list. we use it to support iterations
|
|
|
|
// that can be concurrent with writes
|
2014-02-11 01:04:44 +00:00
|
|
|
ColumnFamilyData* next_;
|
|
|
|
ColumnFamilyData* prev_;
|
2014-01-31 00:49:46 +00:00
|
|
|
|
2014-01-29 21:28:50 +00:00
|
|
|
// This is the earliest log file number that contains data from this
|
|
|
|
// Column Family. All earlier log files must be ignored and not
|
|
|
|
// recovered from
|
|
|
|
uint64_t log_number_;
|
2014-01-30 23:23:13 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
// An object that keeps all the compaction stats
|
|
|
|
// and picks the next compaction
|
|
|
|
std::unique_ptr<CompactionPicker> compaction_picker_;
|
2014-02-11 01:04:44 +00:00
|
|
|
|
|
|
|
ColumnFamilySet* column_family_set_;
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
|
|
|
|
std::unique_ptr<WriteControllerToken> write_controller_token_;
|
Rewritten system for scheduling background work
Summary:
When scaling to higher number of column families, the worst bottleneck was MaybeScheduleFlushOrCompaction(), which did a for loop over all column families while holding a mutex. This patch addresses the issue.
The approach is similar to our earlier efforts: instead of a pull-model, where we do something for every column family, we can do a push-based model -- when we detect that column family is ready to be flushed/compacted, we add it to the flush_queue_/compaction_queue_. That way we don't need to loop over every column family in MaybeScheduleFlushOrCompaction.
Here are the performance results:
Command:
./db_bench --write_buffer_size=268435456 --db_write_buffer_size=268435456 --db=/fast-rocksdb-tmp/rocks_lots_of_cf --use_existing_db=0 --open_files=55000 --statistics=1 --histogram=1 --disable_data_sync=1 --max_write_buffer_number=2 --sync=0 --benchmarks=fillrandom --threads=16 --num_column_families=5000 --disable_wal=1 --max_background_flushes=16 --max_background_compactions=16 --level0_file_num_compaction_trigger=2 --level0_slowdown_writes_trigger=2 --level0_stop_writes_trigger=3 --hard_rate_limit=1 --num=33333333 --writes=33333333
Before the patch:
fillrandom : 26.950 micros/op 37105 ops/sec; 4.1 MB/s
After the patch:
fillrandom : 17.404 micros/op 57456 ops/sec; 6.4 MB/s
Next bottleneck is VersionSet::AddLiveFiles, which is painfully slow when we have a lot of files. This is coming in the next patch, but when I removed that code, here's what I got:
fillrandom : 7.590 micros/op 131758 ops/sec; 14.6 MB/s
Test Plan:
make check
two stress tests:
Big number of compactions and flushes:
./db_stress --threads=30 --ops_per_thread=20000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=15 --max_background_compactions=10 --max_background_flushes=10 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
max_background_flushes=0, to verify that this case also works correctly
./db_stress --threads=30 --ops_per_thread=2000000 --max_key=10000 --column_families=20 --clear_column_family_one_in=10000000 --verify_before_write=0 --reopen=3 --max_background_compactions=3 --max_background_flushes=0 --db=/fast-rocksdb-tmp/db_stress --prefixpercent=0 --iterpercent=0 --writepercent=75 --db_write_buffer_size=2000000
Reviewers: ljin, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D30123
2014-12-19 19:38:12 +00:00
|
|
|
|
|
|
|
// If true --> this ColumnFamily is currently present in DBImpl::flush_queue_
|
|
|
|
bool pending_flush_;
|
|
|
|
|
|
|
|
// If true --> this ColumnFamily is currently present in
|
|
|
|
// DBImpl::compaction_queue_
|
|
|
|
bool pending_compaction_;
|
2014-01-22 19:44:53 +00:00
|
|
|
};
|
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
// ColumnFamilySet has interesting thread-safety requirements
|
2015-01-06 20:44:21 +00:00
|
|
|
// * CreateColumnFamily() or RemoveColumnFamily() -- need to be protected by DB
|
|
|
|
// mutex AND executed in the write thread.
|
|
|
|
// CreateColumnFamily() should ONLY be called from VersionSet::LogAndApply() AND
|
|
|
|
// single-threaded write thread. It is also called during Recovery and in
|
|
|
|
// DumpManifest().
|
|
|
|
// RemoveColumnFamily() is only called from SetDropped(). DB mutex needs to be
|
|
|
|
// held and it needs to be executed from the write thread. SetDropped() also
|
|
|
|
// guarantees that it will be called only from single-threaded LogAndApply(),
|
|
|
|
// but this condition is not that important.
|
2014-03-11 21:52:17 +00:00
|
|
|
// * Iteration -- hold DB mutex, but you can release it in the body of
|
|
|
|
// iteration. If you release DB mutex in body, reference the column
|
|
|
|
// family before the mutex and unreference after you unlock, since the column
|
|
|
|
// family might get dropped when the DB mutex is released
|
|
|
|
// * GetDefault() -- thread safe
|
2015-01-06 20:44:21 +00:00
|
|
|
// * GetColumnFamily() -- either inside of DB mutex or from a write thread
|
2014-06-02 22:33:54 +00:00
|
|
|
// * GetNextColumnFamilyID(), GetMaxColumnFamily(), UpdateMaxColumnFamily(),
|
|
|
|
// NumberOfColumnFamilies -- inside of DB mutex
|
2014-01-22 19:44:53 +00:00
|
|
|
class ColumnFamilySet {
|
|
|
|
public:
|
2014-03-11 21:52:17 +00:00
|
|
|
// ColumnFamilySet supports iteration
|
2014-01-24 22:30:28 +00:00
|
|
|
class iterator {
|
|
|
|
public:
|
2014-01-31 00:49:46 +00:00
|
|
|
explicit iterator(ColumnFamilyData* cfd)
|
|
|
|
: current_(cfd) {}
|
2014-01-24 22:30:28 +00:00
|
|
|
iterator& operator++() {
|
2014-03-11 21:52:17 +00:00
|
|
|
// dummy is never dead or dropped, so this will never be infinite
|
2014-02-11 01:04:44 +00:00
|
|
|
do {
|
2014-03-11 21:52:17 +00:00
|
|
|
current_ = current_->next_;
|
2015-01-26 19:48:07 +00:00
|
|
|
} while (current_->refs_.load(std::memory_order_relaxed) == 0 ||
|
|
|
|
current_->IsDropped());
|
2014-01-24 22:30:28 +00:00
|
|
|
return *this;
|
|
|
|
}
|
2014-01-31 00:49:46 +00:00
|
|
|
bool operator!=(const iterator& other) {
|
|
|
|
return this->current_ != other.current_;
|
|
|
|
}
|
|
|
|
ColumnFamilyData* operator*() { return current_; }
|
2014-01-24 22:30:28 +00:00
|
|
|
|
|
|
|
private:
|
2014-01-31 00:49:46 +00:00
|
|
|
ColumnFamilyData* current_;
|
2014-01-24 22:30:28 +00:00
|
|
|
};
|
2014-01-22 19:44:53 +00:00
|
|
|
|
2014-02-05 21:12:23 +00:00
|
|
|
ColumnFamilySet(const std::string& dbname, const DBOptions* db_options,
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
const EnvOptions& env_options, Cache* table_cache,
|
2014-12-02 20:09:20 +00:00
|
|
|
WriteBuffer* write_buffer, WriteController* write_controller);
|
2014-01-22 19:44:53 +00:00
|
|
|
~ColumnFamilySet();
|
|
|
|
|
|
|
|
ColumnFamilyData* GetDefault() const;
|
|
|
|
// GetColumnFamily() calls return nullptr if column family is not found
|
|
|
|
ColumnFamilyData* GetColumnFamily(uint32_t id) const;
|
2014-02-28 22:05:11 +00:00
|
|
|
ColumnFamilyData* GetColumnFamily(const std::string& name) const;
|
2014-01-22 19:44:53 +00:00
|
|
|
// this call will return the next available column family ID. it guarantees
|
|
|
|
// that there is no column family with id greater than or equal to the
|
2014-03-05 20:13:44 +00:00
|
|
|
// returned value in the current running instance or anytime in RocksDB
|
|
|
|
// instance history.
|
2014-01-22 19:44:53 +00:00
|
|
|
uint32_t GetNextColumnFamilyID();
|
2014-03-05 20:13:44 +00:00
|
|
|
uint32_t GetMaxColumnFamily();
|
|
|
|
void UpdateMaxColumnFamily(uint32_t new_max_column_family);
|
2014-06-02 22:33:54 +00:00
|
|
|
size_t NumberOfColumnFamilies() const;
|
2014-01-22 19:44:53 +00:00
|
|
|
|
|
|
|
ColumnFamilyData* CreateColumnFamily(const std::string& name, uint32_t id,
|
|
|
|
Version* dummy_version,
|
|
|
|
const ColumnFamilyOptions& options);
|
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
iterator begin() { return iterator(dummy_cfd_->next_); }
|
2014-01-31 00:49:46 +00:00
|
|
|
iterator end() { return iterator(dummy_cfd_); }
|
2014-01-22 19:44:53 +00:00
|
|
|
|
2014-04-07 21:21:25 +00:00
|
|
|
// REQUIRES: DB mutex held
|
|
|
|
// Don't call while iterating over ColumnFamilySet
|
|
|
|
void FreeDeadColumnFamilies();
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
private:
|
2014-03-11 21:52:17 +00:00
|
|
|
friend class ColumnFamilyData;
|
|
|
|
// helper function that gets called from cfd destructor
|
|
|
|
// REQUIRES: DB mutex held
|
|
|
|
void RemoveColumnFamily(ColumnFamilyData* cfd);
|
|
|
|
|
|
|
|
// column_families_ and column_family_data_ need to be protected:
|
2015-01-06 20:44:21 +00:00
|
|
|
// * when mutating both conditions have to be satisfied:
|
|
|
|
// 1. DB mutex locked
|
|
|
|
// 2. thread currently in single-threaded write thread
|
|
|
|
// * when reading, at least one condition needs to be satisfied:
|
|
|
|
// 1. DB mutex locked
|
|
|
|
// 2. accessed from a single-threaded write thread
|
2014-01-22 19:44:53 +00:00
|
|
|
std::unordered_map<std::string, uint32_t> column_families_;
|
|
|
|
std::unordered_map<uint32_t, ColumnFamilyData*> column_family_data_;
|
2014-03-11 21:52:17 +00:00
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
uint32_t max_column_family_;
|
2014-01-31 00:49:46 +00:00
|
|
|
ColumnFamilyData* dummy_cfd_;
|
2014-03-11 21:52:17 +00:00
|
|
|
// We don't hold the refcount here, since default column family always exists
|
|
|
|
// We are also not responsible for cleaning up default_cfd_cache_. This is
|
|
|
|
// just a cache that makes common case (accessing default column family)
|
|
|
|
// faster
|
|
|
|
ColumnFamilyData* default_cfd_cache_;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
|
|
|
|
const std::string db_name_;
|
2014-02-05 21:12:23 +00:00
|
|
|
const DBOptions* const db_options_;
|
2014-09-04 23:18:36 +00:00
|
|
|
const EnvOptions env_options_;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
Cache* table_cache_;
|
2014-12-02 20:09:20 +00:00
|
|
|
WriteBuffer* write_buffer_;
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
WriteController* write_controller_;
|
2014-01-22 19:44:53 +00:00
|
|
|
};
|
|
|
|
|
2014-03-11 21:52:17 +00:00
|
|
|
// We use ColumnFamilyMemTablesImpl to provide WriteBatch a way to access
|
|
|
|
// memtables of different column families (specified by ID in the write batch)
|
2014-01-28 19:05:04 +00:00
|
|
|
class ColumnFamilyMemTablesImpl : public ColumnFamilyMemTables {
|
|
|
|
public:
|
2014-09-11 01:46:09 +00:00
|
|
|
explicit ColumnFamilyMemTablesImpl(ColumnFamilySet* column_family_set,
|
|
|
|
FlushScheduler* flush_scheduler)
|
|
|
|
: column_family_set_(column_family_set),
|
|
|
|
current_(nullptr),
|
|
|
|
flush_scheduler_(flush_scheduler) {}
|
2014-01-28 19:05:04 +00:00
|
|
|
|
2014-02-06 00:02:48 +00:00
|
|
|
// sets current_ to ColumnFamilyData with column_family_id
|
|
|
|
// returns false if column family doesn't exist
|
2015-01-06 20:44:21 +00:00
|
|
|
// REQUIRES: under a DB mutex OR from a write thread
|
2014-02-06 00:02:48 +00:00
|
|
|
bool Seek(uint32_t column_family_id) override;
|
|
|
|
|
|
|
|
// Returns log number of the selected column family
|
2015-01-06 20:44:21 +00:00
|
|
|
// REQUIRES: under a DB mutex OR from a write thread
|
2014-02-06 00:02:48 +00:00
|
|
|
uint64_t GetLogNumber() const override;
|
|
|
|
|
|
|
|
// REQUIRES: Seek() called first
|
2015-01-06 20:44:21 +00:00
|
|
|
// REQUIRES: under a DB mutex OR from a write thread
|
2014-02-06 00:02:48 +00:00
|
|
|
virtual MemTable* GetMemTable() const override;
|
2014-01-28 19:05:04 +00:00
|
|
|
|
2014-02-06 00:02:48 +00:00
|
|
|
// Returns column family handle for the selected column family
|
2015-01-06 20:44:21 +00:00
|
|
|
// REQUIRES: under a DB mutex OR from a write thread
|
2014-02-11 01:04:44 +00:00
|
|
|
virtual ColumnFamilyHandle* GetColumnFamilyHandle() override;
|
2014-01-28 19:05:04 +00:00
|
|
|
|
2015-01-06 20:44:21 +00:00
|
|
|
// REQUIRES: under a DB mutex OR from a write thread
|
2014-09-11 01:46:09 +00:00
|
|
|
virtual void CheckMemtableFull() override;
|
|
|
|
|
2014-01-28 19:05:04 +00:00
|
|
|
private:
|
|
|
|
ColumnFamilySet* column_family_set_;
|
2014-02-06 00:02:48 +00:00
|
|
|
ColumnFamilyData* current_;
|
2014-09-11 01:46:09 +00:00
|
|
|
FlushScheduler* flush_scheduler_;
|
2014-02-11 01:04:44 +00:00
|
|
|
ColumnFamilyHandleInternal handle_;
|
2014-01-28 19:05:04 +00:00
|
|
|
};
|
|
|
|
|
2014-08-18 22:19:17 +00:00
|
|
|
extern uint32_t GetColumnFamilyID(ColumnFamilyHandle* column_family);
|
|
|
|
|
2014-09-22 18:37:35 +00:00
|
|
|
extern const Comparator* GetColumnFamilyUserComparator(
|
|
|
|
ColumnFamilyHandle* column_family);
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
} // namespace rocksdb
|