2017-05-10 21:54:35 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <condition_variable>
|
2017-07-28 06:16:18 +00:00
|
|
|
#include <limits>
|
2017-05-10 21:54:35 +00:00
|
|
|
#include <list>
|
|
|
|
#include <memory>
|
|
|
|
#include <set>
|
|
|
|
#include <string>
|
|
|
|
#include <thread>
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
#include <unordered_map>
|
2017-05-10 21:54:35 +00:00
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2017-10-18 00:24:25 +00:00
|
|
|
#include "db/db_iter.h"
|
2017-05-10 21:54:35 +00:00
|
|
|
#include "rocksdb/compaction_filter.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/listener.h"
|
|
|
|
#include "rocksdb/options.h"
|
2017-11-28 19:42:28 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
2017-05-10 21:54:35 +00:00
|
|
|
#include "rocksdb/wal_filter.h"
|
|
|
|
#include "util/mpsc.h"
|
|
|
|
#include "util/mutexlock.h"
|
|
|
|
#include "util/timer_queue.h"
|
|
|
|
#include "utilities/blob_db/blob_db.h"
|
2017-10-13 21:36:36 +00:00
|
|
|
#include "utilities/blob_db/blob_file.h"
|
2017-05-10 21:54:35 +00:00
|
|
|
#include "utilities/blob_db/blob_log_format.h"
|
|
|
|
#include "utilities/blob_db/blob_log_reader.h"
|
|
|
|
#include "utilities/blob_db/blob_log_writer.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class DBImpl;
|
|
|
|
class ColumnFamilyHandle;
|
|
|
|
class ColumnFamilyData;
|
|
|
|
struct FlushJobInfo;
|
|
|
|
|
|
|
|
namespace blob_db {
|
|
|
|
|
|
|
|
class BlobFile;
|
|
|
|
class BlobDBImpl;
|
|
|
|
|
|
|
|
class BlobDBFlushBeginListener : public EventListener {
|
|
|
|
public:
|
2017-12-11 20:01:22 +00:00
|
|
|
explicit BlobDBFlushBeginListener(BlobDBImpl* blob_db_impl)
|
|
|
|
: blob_db_impl_(blob_db_impl) {}
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
void OnFlushBegin(DB* db, const FlushJobInfo& info) override;
|
|
|
|
|
2017-12-11 20:01:22 +00:00
|
|
|
private:
|
|
|
|
BlobDBImpl* blob_db_impl_;
|
2017-05-10 21:54:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// this implements the callback from the WAL which ensures that the
|
|
|
|
// blob record is present in the blob log. If fsync/fdatasync in not
|
|
|
|
// happening on every write, there is the probability that keys in the
|
|
|
|
// blob log can lag the keys in blobs
|
2017-12-11 20:01:22 +00:00
|
|
|
// TODO(yiwu): implement the WAL filter.
|
2017-05-10 21:54:35 +00:00
|
|
|
class BlobReconcileWalFilter : public WalFilter {
|
|
|
|
public:
|
|
|
|
virtual WalFilter::WalProcessingOption LogRecordFound(
|
|
|
|
unsigned long long log_number, const std::string& log_file_name,
|
|
|
|
const WriteBatch& batch, WriteBatch* new_batch,
|
|
|
|
bool* batch_changed) override;
|
|
|
|
|
|
|
|
virtual const char* Name() const override { return "BlobDBWalReconciler"; }
|
|
|
|
};
|
|
|
|
|
|
|
|
class EvictAllVersionsCompactionListener : public EventListener {
|
|
|
|
public:
|
|
|
|
class InternalListener : public CompactionEventListener {
|
|
|
|
friend class BlobDBImpl;
|
|
|
|
|
|
|
|
public:
|
2017-12-11 20:01:22 +00:00
|
|
|
explicit InternalListener(BlobDBImpl* blob_db_impl) : impl_(blob_db_impl) {}
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
virtual void OnCompaction(int level, const Slice& key,
|
|
|
|
CompactionListenerValueType value_type,
|
|
|
|
const Slice& existing_value,
|
|
|
|
const SequenceNumber& sn, bool is_new) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
BlobDBImpl* impl_;
|
|
|
|
};
|
|
|
|
|
2017-12-11 20:01:22 +00:00
|
|
|
explicit EvictAllVersionsCompactionListener(BlobDBImpl* blob_db_impl)
|
|
|
|
: internal_listener_(new InternalListener(blob_db_impl)) {}
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
virtual CompactionEventListener* GetCompactionEventListener() override {
|
|
|
|
return internal_listener_.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<InternalListener> internal_listener_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Comparator to sort "TTL" aware Blob files based on the lower value of
|
|
|
|
// TTL range.
|
|
|
|
struct blobf_compare_ttl {
|
|
|
|
bool operator()(const std::shared_ptr<BlobFile>& lhs,
|
|
|
|
const std::shared_ptr<BlobFile>& rhs) const;
|
|
|
|
};
|
|
|
|
|
2017-07-28 06:16:18 +00:00
|
|
|
struct GCStats {
|
|
|
|
uint64_t blob_count = 0;
|
2017-11-28 19:42:28 +00:00
|
|
|
uint64_t num_keys_overwritten = 0;
|
|
|
|
uint64_t num_keys_expired = 0;
|
|
|
|
uint64_t num_keys_relocated = 0;
|
|
|
|
uint64_t bytes_overwritten = 0;
|
|
|
|
uint64_t bytes_expired = 0;
|
|
|
|
uint64_t bytes_relocated = 0;
|
2017-07-28 06:16:18 +00:00
|
|
|
};
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
/**
|
|
|
|
* The implementation class for BlobDB. This manages the value
|
|
|
|
* part in TTL aware sequentially written files. These files are
|
|
|
|
* Garbage Collected.
|
|
|
|
*/
|
|
|
|
class BlobDBImpl : public BlobDB {
|
|
|
|
friend class EvictAllVersionsCompactionListener;
|
|
|
|
friend class BlobFile;
|
|
|
|
friend class BlobDBIterator;
|
|
|
|
|
|
|
|
public:
|
2017-08-01 19:48:22 +00:00
|
|
|
// deletions check period
|
|
|
|
static constexpr uint32_t kDeleteCheckPeriodMillisecs = 2 * 1000;
|
|
|
|
|
|
|
|
// gc percentage each check period
|
|
|
|
static constexpr uint32_t kGCFilePercentage = 100;
|
|
|
|
|
|
|
|
// gc period
|
|
|
|
static constexpr uint32_t kGCCheckPeriodMillisecs = 60 * 1000;
|
|
|
|
|
|
|
|
// sanity check task
|
|
|
|
static constexpr uint32_t kSanityCheckPeriodMillisecs = 20 * 60 * 1000;
|
|
|
|
|
|
|
|
// how many random access open files can we tolerate
|
|
|
|
static constexpr uint32_t kOpenFilesTrigger = 100;
|
|
|
|
|
|
|
|
// how many periods of stats do we keep.
|
|
|
|
static constexpr uint32_t kWriteAmplificationStatsPeriods = 24;
|
|
|
|
|
|
|
|
// we will garbage collect blob files in
|
|
|
|
// which entire files have expired. However if the
|
|
|
|
// ttl_range of files is very large say a day, we
|
|
|
|
// would have to wait for the entire day, before we
|
|
|
|
// recover most of the space.
|
|
|
|
static constexpr uint32_t kPartialExpirationGCRangeSecs = 4 * 3600;
|
|
|
|
|
|
|
|
// this should be based on allowed Write Amplification
|
|
|
|
// if 50% of the space of a blob file has been deleted/expired,
|
|
|
|
static constexpr uint32_t kPartialExpirationPercentage = 75;
|
|
|
|
|
|
|
|
// how often should we schedule a job to fsync open files
|
|
|
|
static constexpr uint32_t kFSyncFilesPeriodMillisecs = 10 * 1000;
|
|
|
|
|
|
|
|
// how often to schedule reclaim open files.
|
|
|
|
static constexpr uint32_t kReclaimOpenFilesPeriodMillisecs = 1 * 1000;
|
|
|
|
|
|
|
|
// how often to schedule delete obs files periods
|
2017-08-21 01:12:38 +00:00
|
|
|
static constexpr uint32_t kDeleteObsoleteFilesPeriodMillisecs = 10 * 1000;
|
2017-08-01 19:48:22 +00:00
|
|
|
|
|
|
|
// how often to schedule check seq files period
|
|
|
|
static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000;
|
|
|
|
|
2017-11-02 19:02:42 +00:00
|
|
|
// when should oldest file be evicted:
|
|
|
|
// on reaching 90% of blob_dir_size
|
|
|
|
static constexpr double kEvictOldestFileAtSize = 0.9;
|
|
|
|
|
2017-09-08 17:57:12 +00:00
|
|
|
using BlobDB::Put;
|
|
|
|
Status Put(const WriteOptions& options, const Slice& key,
|
|
|
|
const Slice& value) override;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-09-08 17:57:12 +00:00
|
|
|
using BlobDB::Delete;
|
|
|
|
Status Delete(const WriteOptions& options, const Slice& key) override;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-09-08 17:57:12 +00:00
|
|
|
using BlobDB::Get;
|
2017-08-21 01:12:38 +00:00
|
|
|
Status Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family,
|
2017-07-13 00:56:40 +00:00
|
|
|
const Slice& key, PinnableSlice* value) override;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-09-08 17:57:12 +00:00
|
|
|
using BlobDB::NewIterator;
|
|
|
|
virtual Iterator* NewIterator(const ReadOptions& read_options) override;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-10-18 00:24:25 +00:00
|
|
|
using BlobDB::NewIterators;
|
|
|
|
virtual Status NewIterators(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_families,
|
|
|
|
std::vector<Iterator*>* iterators) override {
|
|
|
|
return Status::NotSupported("Not implemented");
|
|
|
|
}
|
|
|
|
|
2017-09-08 17:57:12 +00:00
|
|
|
using BlobDB::MultiGet;
|
2017-05-10 21:54:35 +00:00
|
|
|
virtual std::vector<Status> MultiGet(
|
2017-08-21 01:12:38 +00:00
|
|
|
const ReadOptions& read_options,
|
2017-05-10 21:54:35 +00:00
|
|
|
const std::vector<Slice>& keys,
|
|
|
|
std::vector<std::string>* values) override;
|
|
|
|
|
|
|
|
virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override;
|
|
|
|
|
2017-10-10 02:44:39 +00:00
|
|
|
virtual Status GetLiveFiles(std::vector<std::string>&,
|
|
|
|
uint64_t* manifest_file_size,
|
|
|
|
bool flush_memtable = true) override;
|
|
|
|
virtual void GetLiveFilesMetaData(
|
|
|
|
std::vector<LiveFileMetaData>* ) override;
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
using BlobDB::PutWithTTL;
|
2017-09-08 17:57:12 +00:00
|
|
|
Status PutWithTTL(const WriteOptions& options, const Slice& key,
|
2017-08-04 00:46:00 +00:00
|
|
|
const Slice& value, uint64_t ttl) override;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
using BlobDB::PutUntil;
|
2017-09-08 17:57:12 +00:00
|
|
|
Status PutUntil(const WriteOptions& options, const Slice& key,
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
const Slice& value, uint64_t expiration) override;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-06-14 20:08:54 +00:00
|
|
|
BlobDBOptions GetBlobDBOptions() const override;
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
BlobDBImpl(const std::string& dbname, const BlobDBOptions& bdb_options,
|
2017-12-11 20:01:22 +00:00
|
|
|
const DBOptions& db_options,
|
|
|
|
const ColumnFamilyOptions& cf_options);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
~BlobDBImpl();
|
|
|
|
|
2017-12-11 20:01:22 +00:00
|
|
|
Status Open(std::vector<ColumnFamilyHandle*>* handles);
|
|
|
|
|
|
|
|
Status SyncBlobFiles();
|
|
|
|
|
2017-05-31 17:45:47 +00:00
|
|
|
#ifndef NDEBUG
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
Status TEST_GetBlobValue(const Slice& key, const Slice& index_entry,
|
|
|
|
PinnableSlice* value);
|
|
|
|
|
2017-07-28 06:16:18 +00:00
|
|
|
std::vector<std::shared_ptr<BlobFile>> TEST_GetBlobFiles() const;
|
|
|
|
|
2017-08-20 23:56:01 +00:00
|
|
|
std::vector<std::shared_ptr<BlobFile>> TEST_GetObsoleteFiles() const;
|
|
|
|
|
2017-08-25 17:40:25 +00:00
|
|
|
Status TEST_CloseBlobFile(std::shared_ptr<BlobFile>& bfile);
|
2017-07-28 06:16:18 +00:00
|
|
|
|
|
|
|
Status TEST_GCFileAndUpdateLSM(std::shared_ptr<BlobFile>& bfile,
|
|
|
|
GCStats* gc_stats);
|
2017-08-20 23:56:01 +00:00
|
|
|
|
|
|
|
void TEST_RunGC();
|
2017-08-21 01:12:38 +00:00
|
|
|
|
|
|
|
void TEST_DeleteObsoleteFiles();
|
2017-05-31 17:45:47 +00:00
|
|
|
#endif // !NDEBUG
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
private:
|
2017-10-18 00:24:25 +00:00
|
|
|
class GarbageCollectionWriteCallback;
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
class BlobInserter;
|
2017-10-18 00:24:25 +00:00
|
|
|
|
2017-08-21 01:12:38 +00:00
|
|
|
// Create a snapshot if there isn't one in read options.
|
|
|
|
// Return true if a snapshot is created.
|
|
|
|
bool SetSnapshotIfNeeded(ReadOptions* read_options);
|
|
|
|
|
2017-11-28 19:42:28 +00:00
|
|
|
Status GetImpl(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
|
|
|
PinnableSlice* value);
|
|
|
|
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
Status GetBlobValue(const Slice& key, const Slice& index_entry,
|
|
|
|
PinnableSlice* value);
|
|
|
|
|
2017-06-14 20:44:36 +00:00
|
|
|
Slice GetCompressedSlice(const Slice& raw,
|
|
|
|
std::string* compression_output) const;
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
// is this file ready for Garbage collection. if the TTL of the file
|
|
|
|
// has expired or if threshold of the file has been evicted
|
|
|
|
// tt - current time
|
|
|
|
// last_id - the id of the non-TTL file to evict
|
2017-08-04 00:46:00 +00:00
|
|
|
bool ShouldGCFile(std::shared_ptr<BlobFile> bfile, uint64_t now,
|
2017-10-31 23:33:55 +00:00
|
|
|
bool is_oldest_non_ttl_file, std::string* reason);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-08-25 17:40:25 +00:00
|
|
|
// Close a file by appending a footer, and removes file from open files list.
|
|
|
|
Status CloseBlobFile(std::shared_ptr<BlobFile> bfile);
|
|
|
|
|
|
|
|
// Close a file if its size exceeds blob_file_size
|
|
|
|
Status CloseBlobFileIfNeeded(std::shared_ptr<BlobFile>& bfile);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-08-04 00:46:00 +00:00
|
|
|
uint64_t ExtractExpiration(const Slice& key, const Slice& value,
|
|
|
|
Slice* value_slice, std::string* new_value);
|
2017-07-28 06:16:18 +00:00
|
|
|
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
Status PutBlobValue(const WriteOptions& options, const Slice& key,
|
|
|
|
const Slice& value, uint64_t expiration,
|
2017-12-15 21:18:32 +00:00
|
|
|
WriteBatch* batch);
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
Status AppendBlob(const std::shared_ptr<BlobFile>& bfile,
|
|
|
|
const std::string& headerbuf, const Slice& key,
|
Blob DB: Inline small values in base DB
Summary:
Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values.
Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches.
See blob_index.h for the new blob index format. There are 4 cases when writing a new key:
* small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue)
* small value w/ TTL: put (type, expiration, value) to base db.
* large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db.
* large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db.
Closes https://github.com/facebook/rocksdb/pull/3066
Differential Revision: D6142115
Pulled By: yiwu-arbug
fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0
2017-10-26 19:19:43 +00:00
|
|
|
const Slice& value, uint64_t expiration,
|
|
|
|
std::string* index_entry);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// find an existing blob log file based on the expiration unix epoch
|
|
|
|
// if such a file does not exist, return nullptr
|
2017-08-04 00:46:00 +00:00
|
|
|
std::shared_ptr<BlobFile> SelectBlobFileTTL(uint64_t expiration);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// find an existing blob log file to append the value to
|
|
|
|
std::shared_ptr<BlobFile> SelectBlobFile();
|
|
|
|
|
2017-08-04 00:46:00 +00:00
|
|
|
std::shared_ptr<BlobFile> FindBlobFileLocked(uint64_t expiration) const;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
void Shutdown();
|
|
|
|
|
|
|
|
// periodic sanity check. Bunch of checks
|
|
|
|
std::pair<bool, int64_t> SanityCheck(bool aborted);
|
|
|
|
|
|
|
|
// delete files which have been garbage collected and marked
|
|
|
|
// obsolete. Check whether any snapshots exist which refer to
|
|
|
|
// the same
|
2017-08-21 01:12:38 +00:00
|
|
|
std::pair<bool, int64_t> DeleteObsoleteFiles(bool aborted);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// Major task to garbage collect expired and deleted blobs
|
|
|
|
std::pair<bool, int64_t> RunGC(bool aborted);
|
|
|
|
|
|
|
|
// asynchronous task to fsync/fdatasync the open blob files
|
|
|
|
std::pair<bool, int64_t> FsyncFiles(bool aborted);
|
|
|
|
|
|
|
|
// periodically check if open blob files and their TTL's has expired
|
|
|
|
// if expired, close the sequential writer and make the file immutable
|
|
|
|
std::pair<bool, int64_t> CheckSeqFiles(bool aborted);
|
|
|
|
|
|
|
|
// if the number of open files, approaches ULIMIT's this
|
|
|
|
// task will close random readers, which are kept around for
|
|
|
|
// efficiency
|
|
|
|
std::pair<bool, int64_t> ReclaimOpenFiles(bool aborted);
|
|
|
|
|
|
|
|
// background task to do book-keeping of deleted keys
|
|
|
|
std::pair<bool, int64_t> EvictDeletions(bool aborted);
|
|
|
|
|
|
|
|
std::pair<bool, int64_t> EvictCompacted(bool aborted);
|
|
|
|
|
|
|
|
std::pair<bool, int64_t> RemoveTimerQ(TimerQueue* tq, bool aborted);
|
|
|
|
|
|
|
|
// Adds the background tasks to the timer queue
|
|
|
|
void StartBackgroundTasks();
|
|
|
|
|
|
|
|
// add a new Blob File
|
|
|
|
std::shared_ptr<BlobFile> NewBlobFile(const std::string& reason);
|
|
|
|
|
2017-12-11 20:01:22 +00:00
|
|
|
// collect all the blob log files from the blob directory
|
|
|
|
Status GetAllBlobFiles(std::set<uint64_t>* file_numbers);
|
|
|
|
|
|
|
|
// Open all blob files found in blob_dir.
|
|
|
|
Status OpenAllBlobFiles();
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// hold write mutex on file and call
|
|
|
|
// creates a Random Access reader for GET call
|
|
|
|
std::shared_ptr<RandomAccessFileReader> GetOrOpenRandomAccessReader(
|
|
|
|
const std::shared_ptr<BlobFile>& bfile, Env* env,
|
|
|
|
const EnvOptions& env_options);
|
|
|
|
|
|
|
|
// hold write mutex on file and call.
|
|
|
|
// Close the above Random Access reader
|
|
|
|
void CloseRandomAccessLocked(const std::shared_ptr<BlobFile>& bfile);
|
|
|
|
|
|
|
|
// hold write mutex on file and call
|
|
|
|
// creates a sequential (append) writer for this blobfile
|
|
|
|
Status CreateWriterLocked(const std::shared_ptr<BlobFile>& bfile);
|
|
|
|
|
|
|
|
// returns a Writer object for the file. If writer is not
|
|
|
|
// already present, creates one. Needs Write Mutex to be held
|
|
|
|
std::shared_ptr<Writer> CheckOrCreateWriterLocked(
|
|
|
|
const std::shared_ptr<BlobFile>& bfile);
|
|
|
|
|
|
|
|
// Iterate through keys and values on Blob and write into
|
|
|
|
// separate file the remaining blobs and delete/update pointers
|
|
|
|
// in LSM atomically
|
|
|
|
Status GCFileAndUpdateLSM(const std::shared_ptr<BlobFile>& bfptr,
|
|
|
|
GCStats* gcstats);
|
|
|
|
|
|
|
|
// checks if there is no snapshot which is referencing the
|
|
|
|
// blobs
|
2017-11-02 22:50:30 +00:00
|
|
|
bool VisibleToActiveSnapshot(const std::shared_ptr<BlobFile>& file);
|
2017-05-10 21:54:35 +00:00
|
|
|
bool FileDeleteOk_SnapshotCheckLocked(const std::shared_ptr<BlobFile>& bfile);
|
|
|
|
|
|
|
|
bool MarkBlobDeleted(const Slice& key, const Slice& lsmValue);
|
|
|
|
|
|
|
|
bool FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size,
|
|
|
|
uint64_t blob_offset, uint64_t blob_size);
|
|
|
|
|
2017-11-02 19:02:42 +00:00
|
|
|
void CopyBlobFiles(
|
|
|
|
std::vector<std::shared_ptr<BlobFile>>* bfiles_copy,
|
|
|
|
std::function<bool(const std::shared_ptr<BlobFile>&)> predicate = {});
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
void FilterSubsetOfFiles(
|
|
|
|
const std::vector<std::shared_ptr<BlobFile>>& blob_files,
|
|
|
|
std::vector<std::shared_ptr<BlobFile>>* to_process, uint64_t epoch,
|
2017-08-20 23:56:01 +00:00
|
|
|
size_t files_to_collect);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-07-28 06:16:18 +00:00
|
|
|
uint64_t EpochNow() { return env_->NowMicros() / 1000000; }
|
|
|
|
|
2017-11-02 19:02:42 +00:00
|
|
|
Status CheckSize(size_t blob_size);
|
|
|
|
|
|
|
|
std::shared_ptr<BlobFile> GetOldestBlobFile();
|
|
|
|
|
|
|
|
bool EvictOldestBlobFile();
|
|
|
|
|
2017-12-11 20:01:22 +00:00
|
|
|
// name of the database directory
|
|
|
|
std::string dbname_;
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
// the base DB
|
|
|
|
DBImpl* db_impl_;
|
2017-07-28 06:16:18 +00:00
|
|
|
Env* env_;
|
|
|
|
TTLExtractor* ttl_extractor_;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// the options that govern the behavior of Blob Storage
|
2017-08-01 19:48:22 +00:00
|
|
|
BlobDBOptions bdb_options_;
|
2017-05-10 21:54:35 +00:00
|
|
|
DBOptions db_options_;
|
2017-12-11 20:01:22 +00:00
|
|
|
ColumnFamilyOptions cf_options_;
|
2017-05-10 21:54:35 +00:00
|
|
|
EnvOptions env_options_;
|
|
|
|
|
2017-11-28 19:42:28 +00:00
|
|
|
// Raw pointer of statistic. db_options_ has a shared_ptr to hold ownership.
|
|
|
|
Statistics* statistics_;
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
// by default this is "blob_dir" under dbname_
|
|
|
|
// but can be configured
|
|
|
|
std::string blob_dir_;
|
|
|
|
|
|
|
|
// pointer to directory
|
|
|
|
std::unique_ptr<Directory> dir_ent_;
|
|
|
|
|
|
|
|
std::atomic<bool> dir_change_;
|
|
|
|
|
|
|
|
// Read Write Mutex, which protects all the data structures
|
|
|
|
// HEAVILY TRAFFICKED
|
2017-08-20 23:56:01 +00:00
|
|
|
mutable port::RWMutex mutex_;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2017-08-03 22:07:01 +00:00
|
|
|
// Writers has to hold write_mutex_ before writing.
|
|
|
|
mutable port::Mutex write_mutex_;
|
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
// counter for blob file number
|
|
|
|
std::atomic<uint64_t> next_file_number_;
|
|
|
|
|
|
|
|
// entire metadata of all the BLOB files memory
|
2017-08-20 23:56:01 +00:00
|
|
|
std::map<uint64_t, std::shared_ptr<BlobFile>> blob_files_;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// epoch or version of the open files.
|
|
|
|
std::atomic<uint64_t> epoch_of_;
|
|
|
|
|
2017-10-31 23:33:55 +00:00
|
|
|
// opened non-TTL blob file.
|
|
|
|
std::shared_ptr<BlobFile> open_non_ttl_file_;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// all the blob files which are currently being appended to based
|
|
|
|
// on variety of incoming TTL's
|
2017-10-31 23:33:55 +00:00
|
|
|
std::multiset<std::shared_ptr<BlobFile>, blobf_compare_ttl> open_ttl_files_;
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
// packet of information to put in lockess delete(s) queue
|
|
|
|
struct delete_packet_t {
|
|
|
|
ColumnFamilyHandle* cfh_;
|
|
|
|
std::string key_;
|
|
|
|
SequenceNumber dsn_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct override_packet_t {
|
|
|
|
uint64_t file_number_;
|
|
|
|
uint64_t key_size_;
|
|
|
|
uint64_t blob_offset_;
|
|
|
|
uint64_t blob_size_;
|
|
|
|
SequenceNumber dsn_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// LOCKLESS multiple producer single consumer queue to quickly append
|
|
|
|
// deletes without taking lock. Can rapidly grow in size!!
|
|
|
|
// deletes happen in LSM, but minor book-keeping needs to happen on
|
|
|
|
// BLOB side (for triggering eviction)
|
|
|
|
mpsc_queue_t<delete_packet_t> delete_keys_q_;
|
|
|
|
|
|
|
|
// LOCKLESS multiple producer single consumer queue for values
|
|
|
|
// that are being compacted
|
|
|
|
mpsc_queue_t<override_packet_t> override_vals_q_;
|
|
|
|
|
|
|
|
// atomic bool to represent shutdown
|
|
|
|
std::atomic<bool> shutdown_;
|
|
|
|
|
|
|
|
// timer based queue to execute tasks
|
|
|
|
TimerQueue tqueue_;
|
|
|
|
|
|
|
|
// only accessed in GC thread, hence not atomic. The epoch of the
|
|
|
|
// GC task. Each execution is one epoch. Helps us in allocating
|
|
|
|
// files to one execution
|
|
|
|
uint64_t current_epoch_;
|
|
|
|
|
|
|
|
// number of files opened for random access/GET
|
|
|
|
// counter is used to monitor and close excess RA files.
|
|
|
|
std::atomic<uint32_t> open_file_count_;
|
|
|
|
|
|
|
|
// total size of all blob files at a given time
|
|
|
|
std::atomic<uint64_t> total_blob_space_;
|
|
|
|
std::list<std::shared_ptr<BlobFile>> obsolete_files_;
|
|
|
|
bool open_p1_done_;
|
|
|
|
|
|
|
|
uint32_t debug_level_;
|
2017-11-02 19:02:42 +00:00
|
|
|
|
|
|
|
std::atomic<bool> oldest_file_evicted_;
|
2017-05-10 21:54:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace blob_db
|
|
|
|
} // namespace rocksdb
|
|
|
|
#endif // ROCKSDB_LITE
|