2019-06-06 18:21:11 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2019-06-13 22:39:52 +00:00
|
|
|
#include <atomic>
|
2019-08-09 20:09:04 +00:00
|
|
|
#include <fstream>
|
2019-06-13 22:39:52 +00:00
|
|
|
|
2019-06-06 18:21:11 +00:00
|
|
|
#include "monitoring/instrumented_mutex.h"
|
2022-10-21 19:15:35 +00:00
|
|
|
#include "rocksdb/block_cache_trace_writer.h"
|
2019-06-06 18:21:11 +00:00
|
|
|
#include "rocksdb/options.h"
|
2022-10-21 19:15:35 +00:00
|
|
|
#include "rocksdb/table_reader_caller.h"
|
2019-06-06 18:21:11 +00:00
|
|
|
#include "rocksdb/trace_reader_writer.h"
|
|
|
|
#include "trace_replay/trace_replay.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2021-03-15 11:32:24 +00:00
|
|
|
class Env;
|
|
|
|
class SystemClock;
|
2019-06-06 18:21:11 +00:00
|
|
|
|
2019-07-01 22:11:43 +00:00
|
|
|
extern const uint64_t kMicrosInSecond;
|
2019-07-12 23:52:15 +00:00
|
|
|
extern const uint64_t kSecondInMinute;
|
|
|
|
extern const uint64_t kSecondInHour;
|
|
|
|
|
2019-07-23 00:47:54 +00:00
|
|
|
struct BlockCacheTraceRecord;
|
2019-07-01 22:11:43 +00:00
|
|
|
|
2019-07-04 01:45:36 +00:00
|
|
|
class BlockCacheTraceHelper {
|
|
|
|
public:
|
2019-07-17 20:02:00 +00:00
|
|
|
static bool IsGetOrMultiGetOnDataBlock(TraceType block_type,
|
|
|
|
TableReaderCaller caller);
|
|
|
|
static bool IsGetOrMultiGet(TableReaderCaller caller);
|
2019-07-11 19:40:08 +00:00
|
|
|
static bool IsUserAccess(TableReaderCaller caller);
|
2019-07-23 00:47:54 +00:00
|
|
|
// Row key is a concatenation of the access's fd_number and the referenced
|
|
|
|
// user key.
|
|
|
|
static std::string ComputeRowKey(const BlockCacheTraceRecord& access);
|
Pysim more algorithms (#5644)
Summary:
This PR adds four more eviction policies.
- OPT [1]
- Hyperbolic caching [2]
- ARC [3]
- GreedyDualSize [4]
[1] L. A. Belady. 1966. A Study of Replacement Algorithms for a Virtual-storage Computer. IBM Syst. J. 5, 2 (June 1966), 78-101. DOI=http://dx.doi.org/10.1147/sj.52.0078
[2] Aaron Blankstein, Siddhartha Sen, and Michael J. Freedman. 2017. Hyperbolic caching: flexible caching for web applications. In Proceedings of the 2017 USENIX Conference on Usenix Annual Technical Conference (USENIX ATC '17). USENIX Association, Berkeley, CA, USA, 499-511.
[3] Nimrod Megiddo and Dharmendra S. Modha. 2003. ARC: A Self-Tuning, Low Overhead Replacement Cache. In Proceedings of the 2nd USENIX Conference on File and Storage Technologies (FAST '03). USENIX Association, Berkeley, CA, USA, 115-130.
[4] N. Young. The k-server dual and loose competitiveness for paging. Algorithmica, June 1994, vol. 11,(no.6):525-41. Rewritten version of ''On-line caching as cache size varies'', in The 2nd Annual ACM-SIAM Symposium on Discrete Algorithms, 241-250, 1991.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5644
Differential Revision: D16548817
Pulled By: HaoyuHuang
fbshipit-source-id: 838f76db9179f07911abaab46c97e1c929cfcd63
2019-08-07 01:47:39 +00:00
|
|
|
// The first four bytes of the referenced key in a Get request is the table
|
|
|
|
// id.
|
|
|
|
static uint64_t GetTableId(const BlockCacheTraceRecord& access);
|
|
|
|
// The sequence number of a get request is the last part of the referenced
|
|
|
|
// key.
|
|
|
|
static uint64_t GetSequenceNumber(const BlockCacheTraceRecord& access);
|
|
|
|
// Block offset in a file is the last varint64 in the block key.
|
|
|
|
static uint64_t GetBlockOffsetInFile(const BlockCacheTraceRecord& access);
|
|
|
|
|
2019-07-04 01:45:36 +00:00
|
|
|
static const std::string kUnknownColumnFamilyName;
|
|
|
|
static const uint64_t kReservedGetId;
|
|
|
|
};
|
|
|
|
|
2019-06-10 22:30:05 +00:00
|
|
|
// Lookup context for tracing block cache accesses.
|
|
|
|
// We trace block accesses at five places:
|
|
|
|
// 1. BlockBasedTable::GetFilter
|
|
|
|
// 2. BlockBasedTable::GetUncompressedDict.
|
|
|
|
// 3. BlockBasedTable::MaybeReadAndLoadToCache. (To trace access on data, index,
|
|
|
|
// and range deletion block.)
|
|
|
|
// 4. BlockBasedTable::Get. (To trace the referenced key and whether the
|
|
|
|
// referenced key exists in a fetched data block.)
|
|
|
|
// 5. BlockBasedTable::MultiGet. (To trace the referenced key and whether the
|
|
|
|
// referenced key exists in a fetched data block.)
|
|
|
|
// The context is created at:
|
|
|
|
// 1. BlockBasedTable::Get. (kUserGet)
|
|
|
|
// 2. BlockBasedTable::MultiGet. (kUserMGet)
|
|
|
|
// 3. BlockBasedTable::NewIterator. (either kUserIterator, kCompaction, or
|
|
|
|
// external SST ingestion calls this function.)
|
|
|
|
// 4. BlockBasedTable::Open. (kPrefetch)
|
|
|
|
// 5. Index/Filter::CacheDependencies. (kPrefetch)
|
|
|
|
// 6. BlockBasedTable::ApproximateOffsetOf. (kCompaction or
|
|
|
|
// kUserApproximateSize).
|
|
|
|
struct BlockCacheLookupContext {
|
2019-07-04 01:45:36 +00:00
|
|
|
BlockCacheLookupContext(const TableReaderCaller& _caller) : caller(_caller) {}
|
2019-07-17 20:02:00 +00:00
|
|
|
BlockCacheLookupContext(const TableReaderCaller& _caller, uint64_t _get_id,
|
|
|
|
bool _get_from_user_specified_snapshot)
|
|
|
|
: caller(_caller),
|
|
|
|
get_id(_get_id),
|
|
|
|
get_from_user_specified_snapshot(_get_from_user_specified_snapshot) {}
|
2019-07-04 01:45:36 +00:00
|
|
|
const TableReaderCaller caller;
|
2019-06-15 00:37:24 +00:00
|
|
|
// These are populated when we perform lookup/insert on block cache. The block
|
|
|
|
// cache tracer uses these inforation when logging the block access at
|
|
|
|
// BlockBasedTable::GET and BlockBasedTable::MultiGet.
|
|
|
|
bool is_cache_hit = false;
|
|
|
|
bool no_insert = false;
|
|
|
|
TraceType block_type = TraceType::kTraceMax;
|
|
|
|
uint64_t block_size = 0;
|
|
|
|
std::string block_key;
|
|
|
|
uint64_t num_keys_in_block = 0;
|
2019-07-04 01:45:36 +00:00
|
|
|
// The unique id associated with Get and MultiGet. This enables us to track
|
|
|
|
// how many blocks a Get/MultiGet request accesses. We can also measure the
|
|
|
|
// impact of row cache vs block cache.
|
|
|
|
uint64_t get_id = 0;
|
2019-07-17 20:02:00 +00:00
|
|
|
std::string referenced_key;
|
|
|
|
bool get_from_user_specified_snapshot = false;
|
2019-06-15 00:37:24 +00:00
|
|
|
|
|
|
|
void FillLookupContext(bool _is_cache_hit, bool _no_insert,
|
|
|
|
TraceType _block_type, uint64_t _block_size,
|
|
|
|
const std::string& _block_key,
|
|
|
|
uint64_t _num_keys_in_block) {
|
|
|
|
is_cache_hit = _is_cache_hit;
|
|
|
|
no_insert = _no_insert;
|
|
|
|
block_type = _block_type;
|
|
|
|
block_size = _block_size;
|
|
|
|
block_key = _block_key;
|
|
|
|
num_keys_in_block = _num_keys_in_block;
|
|
|
|
}
|
2019-06-10 22:30:05 +00:00
|
|
|
};
|
|
|
|
|
2019-06-06 18:21:11 +00:00
|
|
|
struct BlockCacheTraceHeader {
|
|
|
|
uint64_t start_time;
|
|
|
|
uint32_t rocksdb_major_version;
|
|
|
|
uint32_t rocksdb_minor_version;
|
|
|
|
};
|
|
|
|
|
|
|
|
// BlockCacheTraceWriter captures all RocksDB block cache accesses using a
|
|
|
|
// user-provided TraceWriter. Every RocksDB operation is written as a single
|
|
|
|
// trace. Each trace will have a timestamp and type, followed by the trace
|
|
|
|
// payload.
|
2022-10-21 19:15:35 +00:00
|
|
|
class BlockCacheTraceWriterImpl : public BlockCacheTraceWriter {
|
2019-06-06 18:21:11 +00:00
|
|
|
public:
|
2022-10-21 19:15:35 +00:00
|
|
|
BlockCacheTraceWriterImpl(SystemClock* clock,
|
|
|
|
const BlockCacheTraceWriterOptions& trace_options,
|
|
|
|
std::unique_ptr<TraceWriter>&& trace_writer);
|
|
|
|
~BlockCacheTraceWriterImpl() = default;
|
2019-06-06 18:21:11 +00:00
|
|
|
// No copy and move.
|
2022-10-21 19:15:35 +00:00
|
|
|
BlockCacheTraceWriterImpl(const BlockCacheTraceWriterImpl&) = delete;
|
|
|
|
BlockCacheTraceWriterImpl& operator=(const BlockCacheTraceWriterImpl&) =
|
|
|
|
delete;
|
|
|
|
BlockCacheTraceWriterImpl(BlockCacheTraceWriterImpl&&) = delete;
|
|
|
|
BlockCacheTraceWriterImpl& operator=(BlockCacheTraceWriterImpl&&) = delete;
|
2019-06-06 18:21:11 +00:00
|
|
|
|
2019-06-15 00:37:24 +00:00
|
|
|
// Pass Slice references to avoid copy.
|
|
|
|
Status WriteBlockAccess(const BlockCacheTraceRecord& record,
|
|
|
|
const Slice& block_key, const Slice& cf_name,
|
|
|
|
const Slice& referenced_key);
|
2019-06-06 18:21:11 +00:00
|
|
|
|
|
|
|
// Write a trace header at the beginning, typically on initiating a trace,
|
|
|
|
// with some metadata like a magic number and RocksDB version.
|
|
|
|
Status WriteHeader();
|
|
|
|
|
|
|
|
private:
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
2022-10-21 19:15:35 +00:00
|
|
|
BlockCacheTraceWriterOptions trace_options_;
|
2019-06-06 18:21:11 +00:00
|
|
|
std::unique_ptr<TraceWriter> trace_writer_;
|
|
|
|
};
|
|
|
|
|
2019-08-09 20:09:04 +00:00
|
|
|
// Write a trace record in human readable format, see
|
|
|
|
// https://github.com/facebook/rocksdb/wiki/Block-cache-analysis-and-simulation-tools#trace-format
|
|
|
|
// for details.
|
|
|
|
class BlockCacheHumanReadableTraceWriter {
|
|
|
|
public:
|
|
|
|
~BlockCacheHumanReadableTraceWriter();
|
|
|
|
|
|
|
|
Status NewWritableFile(const std::string& human_readable_trace_file_path,
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::Env* env);
|
2019-08-09 20:09:04 +00:00
|
|
|
|
|
|
|
Status WriteHumanReadableTraceRecord(const BlockCacheTraceRecord& access,
|
|
|
|
uint64_t block_id, uint64_t get_key_id);
|
|
|
|
|
|
|
|
private:
|
|
|
|
char trace_record_buffer_[1024 * 1024];
|
2020-02-20 20:07:53 +00:00
|
|
|
std::unique_ptr<ROCKSDB_NAMESPACE::WritableFile>
|
|
|
|
human_readable_trace_file_writer_;
|
2019-08-09 20:09:04 +00:00
|
|
|
};
|
|
|
|
|
2019-06-06 18:21:11 +00:00
|
|
|
// BlockCacheTraceReader helps read the trace file generated by
|
|
|
|
// BlockCacheTraceWriter using a user provided TraceReader.
|
|
|
|
class BlockCacheTraceReader {
|
|
|
|
public:
|
|
|
|
BlockCacheTraceReader(std::unique_ptr<TraceReader>&& reader);
|
2020-06-04 22:32:29 +00:00
|
|
|
virtual ~BlockCacheTraceReader() = default;
|
2019-06-06 18:21:11 +00:00
|
|
|
// No copy and move.
|
|
|
|
BlockCacheTraceReader(const BlockCacheTraceReader&) = delete;
|
|
|
|
BlockCacheTraceReader& operator=(const BlockCacheTraceReader&) = delete;
|
|
|
|
BlockCacheTraceReader(BlockCacheTraceReader&&) = delete;
|
|
|
|
BlockCacheTraceReader& operator=(BlockCacheTraceReader&&) = delete;
|
|
|
|
|
|
|
|
Status ReadHeader(BlockCacheTraceHeader* header);
|
|
|
|
|
|
|
|
Status ReadAccess(BlockCacheTraceRecord* record);
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<TraceReader> trace_reader_;
|
|
|
|
};
|
|
|
|
|
2019-08-09 20:09:04 +00:00
|
|
|
// Read a trace record in human readable format, see
|
|
|
|
// https://github.com/facebook/rocksdb/wiki/Block-cache-analysis-and-simulation-tools#trace-format
|
|
|
|
// for detailed.
|
|
|
|
class BlockCacheHumanReadableTraceReader : public BlockCacheTraceReader {
|
|
|
|
public:
|
|
|
|
BlockCacheHumanReadableTraceReader(const std::string& trace_file_path);
|
|
|
|
|
|
|
|
~BlockCacheHumanReadableTraceReader();
|
|
|
|
|
|
|
|
Status ReadHeader(BlockCacheTraceHeader* header);
|
|
|
|
|
|
|
|
Status ReadAccess(BlockCacheTraceRecord* record);
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::ifstream human_readable_trace_reader_;
|
|
|
|
};
|
|
|
|
|
2019-06-13 22:39:52 +00:00
|
|
|
// A block cache tracer. It downsamples the accesses according to
|
|
|
|
// trace_options and uses BlockCacheTraceWriter to write the access record to
|
|
|
|
// the trace file.
|
|
|
|
class BlockCacheTracer {
|
|
|
|
public:
|
|
|
|
BlockCacheTracer();
|
|
|
|
~BlockCacheTracer();
|
|
|
|
// No copy and move.
|
|
|
|
BlockCacheTracer(const BlockCacheTracer&) = delete;
|
|
|
|
BlockCacheTracer& operator=(const BlockCacheTracer&) = delete;
|
|
|
|
BlockCacheTracer(BlockCacheTracer&&) = delete;
|
|
|
|
BlockCacheTracer& operator=(BlockCacheTracer&&) = delete;
|
|
|
|
|
|
|
|
// Start writing block cache accesses to the trace_writer.
|
2022-10-21 19:15:35 +00:00
|
|
|
Status StartTrace(const BlockCacheTraceOptions& trace_options,
|
|
|
|
std::unique_ptr<BlockCacheTraceWriter>&& trace_writer);
|
2019-06-13 22:39:52 +00:00
|
|
|
|
|
|
|
// Stop writing block cache accesses to the trace_writer.
|
|
|
|
void EndTrace();
|
|
|
|
|
2019-06-27 15:31:03 +00:00
|
|
|
bool is_tracing_enabled() const {
|
|
|
|
return writer_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2019-06-15 00:37:24 +00:00
|
|
|
Status WriteBlockAccess(const BlockCacheTraceRecord& record,
|
|
|
|
const Slice& block_key, const Slice& cf_name,
|
|
|
|
const Slice& referenced_key);
|
2019-06-13 22:39:52 +00:00
|
|
|
|
2022-05-05 20:08:21 +00:00
|
|
|
// GetId cycles from 1 to std::numeric_limits<uint64_t>::max().
|
2019-07-04 01:45:36 +00:00
|
|
|
uint64_t NextGetId();
|
|
|
|
|
2019-06-13 22:39:52 +00:00
|
|
|
private:
|
2022-10-21 19:15:35 +00:00
|
|
|
BlockCacheTraceOptions trace_options_;
|
2019-06-13 22:39:52 +00:00
|
|
|
// A mutex protects the writer_.
|
|
|
|
InstrumentedMutex trace_writer_mutex_;
|
|
|
|
std::atomic<BlockCacheTraceWriter*> writer_;
|
2019-07-04 01:45:36 +00:00
|
|
|
std::atomic<uint64_t> get_id_counter_;
|
2019-06-13 22:39:52 +00:00
|
|
|
};
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|