2016-05-03 18:05:42 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-05-03 18:05:42 +00:00
|
|
|
#pragma once
|
|
|
|
|
2017-02-27 20:59:34 +00:00
|
|
|
|
2022-10-24 23:38:09 +00:00
|
|
|
#ifndef OS_WIN
|
2016-05-03 18:05:42 +00:00
|
|
|
#include <unistd.h>
|
2022-10-24 23:38:09 +00:00
|
|
|
#endif // ! OS_WIN
|
2016-08-03 00:15:18 +00:00
|
|
|
|
2017-05-22 17:21:38 +00:00
|
|
|
#include <atomic>
|
2016-05-03 18:05:42 +00:00
|
|
|
#include <list>
|
|
|
|
#include <memory>
|
|
|
|
#include <set>
|
|
|
|
#include <sstream>
|
|
|
|
#include <stdexcept>
|
|
|
|
#include <string>
|
|
|
|
#include <thread>
|
|
|
|
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/arena.h"
|
2017-04-06 20:59:31 +00:00
|
|
|
#include "memtable/skiplist.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/histogram.h"
|
2016-08-03 00:15:18 +00:00
|
|
|
#include "port/port.h"
|
2021-01-26 06:07:26 +00:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/persistent_cache.h"
|
|
|
|
#include "rocksdb/system_clock.h"
|
2016-05-03 18:05:42 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/crc32c.h"
|
|
|
|
#include "util/mutexlock.h"
|
2021-01-26 06:07:26 +00:00
|
|
|
#include "utilities/persistent_cache/block_cache_tier_file.h"
|
|
|
|
#include "utilities/persistent_cache/block_cache_tier_metadata.h"
|
|
|
|
#include "utilities/persistent_cache/persistent_cache_util.h"
|
2016-05-03 18:05:42 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-05-03 18:05:42 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Block cache tier implementation
|
|
|
|
//
|
|
|
|
class BlockCacheTier : public PersistentCacheTier {
|
|
|
|
public:
|
|
|
|
explicit BlockCacheTier(const PersistentCacheConfig& opt)
|
|
|
|
: opt_(opt),
|
2018-09-06 01:07:53 +00:00
|
|
|
insert_ops_(static_cast<size_t>(opt_.max_write_pipeline_backlog_size)),
|
2016-05-03 18:05:42 +00:00
|
|
|
buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()),
|
2022-10-24 23:38:09 +00:00
|
|
|
writer_(this, opt_.writer_qdepth,
|
|
|
|
static_cast<size_t>(opt_.writer_dispatch_size)) {
|
2019-04-04 19:05:42 +00:00
|
|
|
Info(opt_.log, "Initializing allocator. size=%d B count=%" ROCKSDB_PRIszt,
|
2016-05-03 18:05:42 +00:00
|
|
|
opt_.write_buffer_size, opt_.write_buffer_count());
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~BlockCacheTier() {
|
2016-09-30 01:02:13 +00:00
|
|
|
// Close is re-entrant so we can call close even if it is already closed
|
2020-09-16 22:45:30 +00:00
|
|
|
Close().PermitUncheckedError();
|
2016-05-03 18:05:42 +00:00
|
|
|
assert(!insert_th_.joinable());
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Insert(const Slice& key, const char* data, const size_t size) override;
|
|
|
|
Status Lookup(const Slice& key, std::unique_ptr<char[]>* data,
|
|
|
|
size_t* size) override;
|
|
|
|
Status Open() override;
|
|
|
|
Status Close() override;
|
|
|
|
bool Erase(const Slice& key) override;
|
|
|
|
bool Reserve(const size_t size) override;
|
|
|
|
|
|
|
|
bool IsCompressed() override { return opt_.is_compressed; }
|
|
|
|
|
2016-12-19 22:00:04 +00:00
|
|
|
std::string GetPrintableOptions() const override { return opt_.ToString(); }
|
|
|
|
|
2016-11-22 01:22:01 +00:00
|
|
|
PersistentCache::StatsType Stats() override;
|
2016-05-03 18:05:42 +00:00
|
|
|
|
|
|
|
void TEST_Flush() override {
|
|
|
|
while (insert_ops_.Size()) {
|
2016-08-03 21:23:21 +00:00
|
|
|
/* sleep override */
|
2021-01-26 06:07:26 +00:00
|
|
|
SystemClock::Default()->SleepForMicroseconds(1000000);
|
2016-05-03 18:05:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Percentage of cache to be evicted when the cache is full
|
|
|
|
static const size_t kEvictPct = 10;
|
|
|
|
// Max attempts to insert key, value to cache in pipelined mode
|
|
|
|
static const size_t kMaxRetry = 3;
|
|
|
|
|
|
|
|
// Pipelined operation
|
|
|
|
struct InsertOp {
|
|
|
|
explicit InsertOp(const bool signal) : signal_(signal) {}
|
|
|
|
explicit InsertOp(std::string&& key, const std::string& data)
|
|
|
|
: key_(std::move(key)), data_(data) {}
|
|
|
|
~InsertOp() {}
|
|
|
|
|
|
|
|
InsertOp() = delete;
|
2018-04-13 00:55:14 +00:00
|
|
|
InsertOp(InsertOp&& /*rhs*/) = default;
|
2016-05-03 18:05:42 +00:00
|
|
|
InsertOp& operator=(InsertOp&& rhs) = default;
|
|
|
|
|
|
|
|
// used for estimating size by bounded queue
|
|
|
|
size_t Size() { return data_.size() + key_.size(); }
|
|
|
|
|
|
|
|
std::string key_;
|
|
|
|
std::string data_;
|
2019-04-05 22:16:15 +00:00
|
|
|
bool signal_ = false; // signal to request processing thread to exit
|
2016-05-03 18:05:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// entry point for insert thread
|
|
|
|
void InsertMain();
|
|
|
|
// insert implementation
|
|
|
|
Status InsertImpl(const Slice& key, const Slice& data);
|
|
|
|
// Create a new cache file
|
2016-11-22 18:26:08 +00:00
|
|
|
Status NewCacheFile();
|
2016-05-03 18:05:42 +00:00
|
|
|
// Get cache directory path
|
|
|
|
std::string GetCachePath() const { return opt_.path + "/cache"; }
|
|
|
|
// Cleanup folder
|
|
|
|
Status CleanupCacheFolder(const std::string& folder);
|
|
|
|
|
|
|
|
// Statistics
|
2016-11-22 01:22:01 +00:00
|
|
|
struct Statistics {
|
2016-05-03 18:05:42 +00:00
|
|
|
HistogramImpl bytes_pipelined_;
|
|
|
|
HistogramImpl bytes_written_;
|
|
|
|
HistogramImpl bytes_read_;
|
|
|
|
HistogramImpl read_hit_latency_;
|
|
|
|
HistogramImpl read_miss_latency_;
|
|
|
|
HistogramImpl write_latency_;
|
2017-05-22 17:21:38 +00:00
|
|
|
std::atomic<uint64_t> cache_hits_{0};
|
|
|
|
std::atomic<uint64_t> cache_misses_{0};
|
|
|
|
std::atomic<uint64_t> cache_errors_{0};
|
|
|
|
std::atomic<uint64_t> insert_dropped_{0};
|
2016-05-03 18:05:42 +00:00
|
|
|
|
|
|
|
double CacheHitPct() const {
|
|
|
|
const auto lookups = cache_hits_ + cache_misses_;
|
|
|
|
return lookups ? 100 * cache_hits_ / static_cast<double>(lookups) : 0.0;
|
|
|
|
}
|
|
|
|
|
|
|
|
double CacheMissPct() const {
|
|
|
|
const auto lookups = cache_hits_ + cache_misses_;
|
|
|
|
return lookups ? 100 * cache_misses_ / static_cast<double>(lookups) : 0.0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
port::RWMutex lock_; // Synchronization
|
|
|
|
const PersistentCacheConfig opt_; // BlockCache options
|
|
|
|
BoundedQueue<InsertOp> insert_ops_; // Ops waiting for insert
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::Thread insert_th_; // Insert thread
|
2016-05-03 18:05:42 +00:00
|
|
|
uint32_t writer_cache_id_ = 0; // Current cache file identifier
|
|
|
|
WriteableCacheFile* cache_file_ = nullptr; // Current cache file reference
|
|
|
|
CacheWriteBufferAllocator buffer_allocator_; // Buffer provider
|
|
|
|
ThreadedWriter writer_; // Writer threads
|
|
|
|
BlockCacheTierMetadata metadata_; // Cache meta data manager
|
|
|
|
std::atomic<uint64_t> size_{0}; // Size of the cache
|
2022-10-24 23:38:09 +00:00
|
|
|
Statistics stats_; // Statistics
|
2016-05-03 18:05:42 +00:00
|
|
|
};
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2017-02-27 20:59:34 +00:00
|
|
|
|