Sync tickers and histograms across C++ and Java (#12355)

Summary:
The RocksDB ticker and histogram statistics were out of sync between the C++ and Java code, with a number of newer stats missing in TickerType.java and HistogramType.java. Also, there were gaps in numbering in portal.h, which could soon become an issue due to the number of tickers and the fact that we're limited to 1 byte in Java. This PR adds the missing stats, and re-numbers all of them. It also moves some stats around to try to group related stats together. Since this will go into a major release, compatibility shouldn't be an issue.

This should be automated at some point, since the current process is somewhat error prone.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/12355

Reviewed By: jaykorean

Differential Revision: D53825324

Pulled By: anand1976

fbshipit-source-id: 298c180872f4b9f1ee54b8bb22f4e280458e7e09
This commit is contained in:
anand76 2024-02-15 17:22:03 -08:00 committed by Facebook GitHub Bot
parent 12018136d8
commit 28c1c15c29
6 changed files with 1566 additions and 1326 deletions

View File

@ -3714,7 +3714,7 @@ int main(int argc, char** argv) {
StartPhase("statistics");
{
const uint32_t BYTES_WRITTEN_TICKER = 40;
const uint32_t BYTES_WRITTEN_TICKER = 60;
const uint32_t DB_WRITE_HIST = 1;
rocksdb_statistics_histogram_data_t* hist =

View File

@ -72,6 +72,42 @@ enum Tickers : uint32_t {
// # of bytes written into cache.
BLOCK_CACHE_BYTES_WRITE,
BLOCK_CACHE_COMPRESSION_DICT_MISS,
BLOCK_CACHE_COMPRESSION_DICT_HIT,
BLOCK_CACHE_COMPRESSION_DICT_ADD,
BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
// # of blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_ADD_REDUNDANT <= BLOCK_CACHE_ADD
BLOCK_CACHE_ADD_REDUNDANT,
// # of index blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_INDEX_ADD_REDUNDANT <= BLOCK_CACHE_INDEX_ADD
BLOCK_CACHE_INDEX_ADD_REDUNDANT,
// # of filter blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_FILTER_ADD_REDUNDANT <= BLOCK_CACHE_FILTER_ADD
BLOCK_CACHE_FILTER_ADD_REDUNDANT,
// # of data blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_DATA_ADD_REDUNDANT <= BLOCK_CACHE_DATA_ADD
BLOCK_CACHE_DATA_ADD_REDUNDANT,
// # of dict blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT
// <= BLOCK_CACHE_COMPRESSION_DICT_ADD
BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT,
// Secondary cache statistics
SECONDARY_CACHE_HITS,
// Fine grained secondary cache stats
SECONDARY_CACHE_FILTER_HITS,
SECONDARY_CACHE_INDEX_HITS,
SECONDARY_CACHE_DATA_HITS,
// Compressed secondary cache related stats
COMPRESSED_SECONDARY_CACHE_DUMMY_HITS,
COMPRESSED_SECONDARY_CACHE_HITS,
COMPRESSED_SECONDARY_CACHE_PROMOTIONS,
COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS,
// # of times bloom filter has avoided file reads, i.e., negatives.
BLOOM_FILTER_USEFUL,
// # of times bloom FullFilter has not avoided the reads.
@ -79,6 +115,16 @@ enum Tickers : uint32_t {
// # of times bloom FullFilter has not avoided the reads and data actually
// exist.
BLOOM_FILTER_FULL_TRUE_POSITIVE,
// Prefix filter stats when used for point lookups (Get / MultiGet).
// (For prefix filter stats on iterators, see *_LEVEL_SEEK_*.)
// Checked: filter was queried
BLOOM_FILTER_PREFIX_CHECKED,
// Useful: filter returned false so prevented accessing data+index blocks
BLOOM_FILTER_PREFIX_USEFUL,
// True positive: found a key matching the point query. When another key
// with the same prefix matches, it is considered a false positive by
// these statistics even though the filter returned a true positive.
BLOOM_FILTER_PREFIX_TRUE_POSITIVE,
// # persistent cache hit
PERSISTENT_CACHE_HIT,
@ -142,6 +188,15 @@ enum Tickers : uint32_t {
// The number of uncompressed bytes read from an iterator.
// Includes size of key and value.
ITER_BYTES_READ,
// Number of internal keys skipped by Iterator
NUMBER_ITER_SKIP,
// Number of times we had to reseek inside an iteration to skip
// over large number of keys with same userkey.
NUMBER_OF_RESEEKS_IN_ITERATION,
NO_ITERATOR_CREATED, // number of iterators created
NO_ITERATOR_DELETED, // number of iterators deleted
NO_FILE_OPENS,
NO_FILE_ERRORS,
// Writer has to wait for compaction or flush to finish.
@ -154,24 +209,13 @@ enum Tickers : uint32_t {
NUMBER_MULTIGET_CALLS,
NUMBER_MULTIGET_KEYS_READ,
NUMBER_MULTIGET_BYTES_READ,
// Number of keys actually found in MultiGet calls (vs number requested by
// caller)
// NUMBER_MULTIGET_KEYS_READ gives the number requested by caller
NUMBER_MULTIGET_KEYS_FOUND,
NUMBER_MERGE_FAILURES,
// Prefix filter stats when used for point lookups (Get / MultiGet).
// (For prefix filter stats on iterators, see *_LEVEL_SEEK_*.)
// Checked: filter was queried
BLOOM_FILTER_PREFIX_CHECKED,
// Useful: filter returned false so prevented accessing data+index blocks
BLOOM_FILTER_PREFIX_USEFUL,
// True positive: found a key matching the point query. When another key
// with the same prefix matches, it is considered a false positive by
// these statistics even though the filter returned a true positive.
BLOOM_FILTER_PREFIX_TRUE_POSITIVE,
// Number of times we had to reseek inside an iteration to skip
// over large number of keys with same userkey.
NUMBER_OF_RESEEKS_IN_ITERATION,
// Record the number of calls to GetUpdatesSince. Useful to keep track of
// transaction log iterator refreshes
GET_UPDATES_SINCE_CALLS,
@ -206,8 +250,35 @@ enum Tickers : uint32_t {
NUMBER_BLOCK_COMPRESSED,
NUMBER_BLOCK_DECOMPRESSED,
// DEPRECATED / unused (see NUMBER_BLOCK_COMPRESSION_*)
NUMBER_BLOCK_NOT_COMPRESSED,
// Number of input bytes (uncompressed) to compression for SST blocks that
// are stored compressed.
BYTES_COMPRESSED_FROM,
// Number of output bytes (compressed) from compression for SST blocks that
// are stored compressed.
BYTES_COMPRESSED_TO,
// Number of uncompressed bytes for SST blocks that are stored uncompressed
// because compression type is kNoCompression, or some error case caused
// compression not to run or produce an output. Index blocks are only counted
// if enable_index_compression is true.
BYTES_COMPRESSION_BYPASSED,
// Number of input bytes (uncompressed) to compression for SST blocks that
// are stored uncompressed because the compression result was rejected,
// either because the ratio was not acceptable (see
// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
// `verify_compression` option.
BYTES_COMPRESSION_REJECTED,
// Like BYTES_COMPRESSION_BYPASSED but counting number of blocks
NUMBER_BLOCK_COMPRESSION_BYPASSED,
// Like BYTES_COMPRESSION_REJECTED but counting number of blocks
NUMBER_BLOCK_COMPRESSION_REJECTED,
// Number of input bytes (compressed) to decompression in reading compressed
// SST blocks from storage.
BYTES_DECOMPRESSED_FROM,
// Number of output bytes (uncompressed) from decompression in reading
// compressed SST blocks from storage.
BYTES_DECOMPRESSED_TO,
// Tickers that record cumulative time.
MERGE_OPERATION_TOTAL_TIME,
@ -229,9 +300,6 @@ enum Tickers : uint32_t {
// Number of refill intervals where rate limiter's bytes are fully consumed.
NUMBER_RATE_LIMITER_DRAINS,
// Number of internal keys skipped by Iterator
NUMBER_ITER_SKIP,
// BlobDB specific stats
// # of Put/PutTTL/PutUntil to BlobDB. Only applicable to legacy BlobDB.
BLOB_DB_NUM_PUT,
@ -310,6 +378,20 @@ enum Tickers : uint32_t {
// applicable to legacy BlobDB.
BLOB_DB_FIFO_BYTES_EVICTED,
// Integrated BlobDB specific stats
// # of times cache miss when accessing blob from blob cache.
BLOB_DB_CACHE_MISS,
// # of times cache hit when accessing blob from blob cache.
BLOB_DB_CACHE_HIT,
// # of data blocks added to blob cache.
BLOB_DB_CACHE_ADD,
// # of failures when adding blobs to blob cache.
BLOB_DB_CACHE_ADD_FAILURES,
// # of bytes read from blob cache.
BLOB_DB_CACHE_BYTES_READ,
// # of bytes written into blob cache.
BLOB_DB_CACHE_BYTES_WRITE,
// These counters indicate a performance issue in WritePrepared transactions.
// We should not seem them ticking them much.
// # of times prepare_mutex_ is acquired in the fast path.
@ -323,36 +405,6 @@ enum Tickers : uint32_t {
// # of times ::Get returned TryAgain due to expired snapshot seq
TXN_GET_TRY_AGAIN,
// Number of keys actually found in MultiGet calls (vs number requested by
// caller)
// NUMBER_MULTIGET_KEYS_READ gives the number requested by caller
NUMBER_MULTIGET_KEYS_FOUND,
NO_ITERATOR_CREATED, // number of iterators created
NO_ITERATOR_DELETED, // number of iterators deleted
BLOCK_CACHE_COMPRESSION_DICT_MISS,
BLOCK_CACHE_COMPRESSION_DICT_HIT,
BLOCK_CACHE_COMPRESSION_DICT_ADD,
BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
// # of blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_ADD_REDUNDANT <= BLOCK_CACHE_ADD
BLOCK_CACHE_ADD_REDUNDANT,
// # of index blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_INDEX_ADD_REDUNDANT <= BLOCK_CACHE_INDEX_ADD
BLOCK_CACHE_INDEX_ADD_REDUNDANT,
// # of filter blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_FILTER_ADD_REDUNDANT <= BLOCK_CACHE_FILTER_ADD
BLOCK_CACHE_FILTER_ADD_REDUNDANT,
// # of data blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_DATA_ADD_REDUNDANT <= BLOCK_CACHE_DATA_ADD
BLOCK_CACHE_DATA_ADD_REDUNDANT,
// # of dict blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT
// <= BLOCK_CACHE_COMPRESSION_DICT_ADD
BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT,
// # of files marked as trash by sst file manager and will be deleted
// later by background thread.
FILES_MARKED_TRASH,
@ -377,9 +429,6 @@ enum Tickers : uint32_t {
// Outdated bytes of data present on memtable at flush time.
MEMTABLE_GARBAGE_BYTES_AT_FLUSH,
// Secondary cache statistics
SECONDARY_CACHE_HITS,
// Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs.
VERIFY_CHECKSUM_READ_BYTES,
@ -440,30 +489,11 @@ enum Tickers : uint32_t {
MULTIGET_COROUTINE_COUNT,
// Integrated BlobDB specific stats
// # of times cache miss when accessing blob from blob cache.
BLOB_DB_CACHE_MISS,
// # of times cache hit when accessing blob from blob cache.
BLOB_DB_CACHE_HIT,
// # of data blocks added to blob cache.
BLOB_DB_CACHE_ADD,
// # of failures when adding blobs to blob cache.
BLOB_DB_CACHE_ADD_FAILURES,
// # of bytes read from blob cache.
BLOB_DB_CACHE_BYTES_READ,
// # of bytes written into blob cache.
BLOB_DB_CACHE_BYTES_WRITE,
// Time spent in the ReadAsync file system call
READ_ASYNC_MICROS,
// Number of errors returned to the async read callback
ASYNC_READ_ERROR_COUNT,
// Fine grained secondary cache stats
SECONDARY_CACHE_FILTER_HITS,
SECONDARY_CACHE_INDEX_HITS,
SECONDARY_CACHE_DATA_HITS,
// Number of lookup into the prefetched tail (see
// `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`)
// that can't find its data for table open
@ -479,36 +509,6 @@ enum Tickers : uint32_t {
// # of times timestamps can successfully help skip the table access
TIMESTAMP_FILTER_TABLE_FILTERED,
// Number of input bytes (uncompressed) to compression for SST blocks that
// are stored compressed.
BYTES_COMPRESSED_FROM,
// Number of output bytes (compressed) from compression for SST blocks that
// are stored compressed.
BYTES_COMPRESSED_TO,
// Number of uncompressed bytes for SST blocks that are stored uncompressed
// because compression type is kNoCompression, or some error case caused
// compression not to run or produce an output. Index blocks are only counted
// if enable_index_compression is true.
BYTES_COMPRESSION_BYPASSED,
// Number of input bytes (uncompressed) to compression for SST blocks that
// are stored uncompressed because the compression result was rejected,
// either because the ratio was not acceptable (see
// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
// `verify_compression` option.
BYTES_COMPRESSION_REJECTED,
// Like BYTES_COMPRESSION_BYPASSED but counting number of blocks
NUMBER_BLOCK_COMPRESSION_BYPASSED,
// Like BYTES_COMPRESSION_REJECTED but counting number of blocks
NUMBER_BLOCK_COMPRESSION_REJECTED,
// Number of input bytes (compressed) to decompression in reading compressed
// SST blocks from storage.
BYTES_DECOMPRESSED_FROM,
// Number of output bytes (uncompressed) from decompression in reading
// compressed SST blocks from storage.
BYTES_DECOMPRESSED_TO,
// Number of times readahead is trimmed during scans when
// ReadOptions.auto_readahead_size is set.
READAHEAD_TRIMMED,
@ -526,12 +526,6 @@ enum Tickers : uint32_t {
// Number of FS reads avoided due to scan prefetching
PREFETCH_HITS,
// Compressed secondary cache related stats
COMPRESSED_SECONDARY_CACHE_DUMMY_HITS,
COMPRESSED_SECONDARY_CACHE_HITS,
COMPRESSED_SECONDARY_CACHE_PROMOTIONS,
COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS,
TICKER_ENUM_MAX
};
@ -599,8 +593,6 @@ enum Histograms : uint32_t {
BYTES_PER_WRITE,
BYTES_PER_MULTIGET,
BYTES_COMPRESSED, // DEPRECATED / unused (see BYTES_COMPRESSED_{FROM,TO})
BYTES_DECOMPRESSED, // DEPRECATED / unused (see BYTES_DECOMPRESSED_{FROM,TO})
COMPRESSION_TIMES_NANOS,
DECOMPRESSION_TIMES_NANOS,
// Number of merge operands passed to the merge operator in user read
@ -640,11 +632,15 @@ enum Histograms : uint32_t {
FLUSH_TIME,
SST_BATCH_SIZE,
// Number of IOs issued in parallel in a MultiGet batch
MULTIGET_IO_BATCH_SIZE,
// MultiGet stats logged per level
// Num of index and filter blocks read from file system per level.
NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL,
// Num of sst files read from file system per level.
NUM_SST_READ_PER_LEVEL,
// Number of levels requiring IO for MultiGet
NUM_LEVEL_READ_PER_MULTIGET,
// Error handler statistics
ERROR_HANDLER_AUTORESUME_RETRY_COUNT,
@ -656,12 +652,6 @@ enum Histograms : uint32_t {
// Number of prefetched bytes discarded by RocksDB.
PREFETCHED_BYTES_DISCARDED,
// Number of IOs issued in parallel in a MultiGet batch
MULTIGET_IO_BATCH_SIZE,
// Number of levels requiring IO for MultiGet
NUM_LEVEL_READ_PER_MULTIGET,
// Wait time for aborting async read in FilePrefetchBuffer destructor
ASYNC_PREFETCH_ABORT_MICROS,

File diff suppressed because it is too large Load Diff

View File

@ -13,188 +13,205 @@ public enum HistogramType {
COMPACTION_TIME((byte) 0x2),
SUBCOMPACTION_SETUP_TIME((byte) 0x3),
COMPACTION_CPU_TIME((byte) 0x3),
TABLE_SYNC_MICROS((byte) 0x4),
SUBCOMPACTION_SETUP_TIME((byte) 0x4),
COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x5),
TABLE_SYNC_MICROS((byte) 0x5),
WAL_FILE_SYNC_MICROS((byte) 0x6),
COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x6),
MANIFEST_FILE_SYNC_MICROS((byte) 0x7),
WAL_FILE_SYNC_MICROS((byte) 0x7),
MANIFEST_FILE_SYNC_MICROS((byte) 0x8),
/**
* TIME SPENT IN IO DURING TABLE OPEN.
*/
TABLE_OPEN_IO_MICROS((byte) 0x8),
TABLE_OPEN_IO_MICROS((byte) 0x9),
DB_MULTIGET((byte) 0x9),
DB_MULTIGET((byte) 0xA),
READ_BLOCK_COMPACTION_MICROS((byte) 0xA),
READ_BLOCK_COMPACTION_MICROS((byte) 0xB),
READ_BLOCK_GET_MICROS((byte) 0xB),
READ_BLOCK_GET_MICROS((byte) 0xC),
WRITE_RAW_BLOCK_MICROS((byte) 0xC),
WRITE_RAW_BLOCK_MICROS((byte) 0xD),
NUM_FILES_IN_SINGLE_COMPACTION((byte) 0x12),
NUM_FILES_IN_SINGLE_COMPACTION((byte) 0xE),
DB_SEEK((byte) 0x13),
DB_SEEK((byte) 0xF),
WRITE_STALL((byte) 0x14),
WRITE_STALL((byte) 0x10),
SST_READ_MICROS((byte) 0x15),
SST_READ_MICROS((byte) 0x11),
FILE_READ_FLUSH_MICROS((byte) 0x12),
FILE_READ_COMPACTION_MICROS((byte) 0x13),
FILE_READ_DB_OPEN_MICROS((byte) 0x14),
FILE_READ_GET_MICROS((byte) 0x15),
FILE_READ_MULTIGET_MICROS((byte) 0x16),
FILE_READ_DB_ITERATOR_MICROS((byte) 0x17),
FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x18),
FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x19),
SST_WRITE_MICROS((byte) 0x1A),
FILE_WRITE_FLUSH_MICROS((byte) 0x1B),
FILE_WRITE_COMPACTION_MICROS((byte) 0x1C),
FILE_WRITE_DB_OPEN_MICROS((byte) 0x1D),
/**
* The number of subcompactions actually scheduled during a compaction.
*/
NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x16),
NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x1E),
/**
* Value size distribution in each operation.
*/
BYTES_PER_READ((byte) 0x17),
BYTES_PER_WRITE((byte) 0x18),
BYTES_PER_MULTIGET((byte) 0x19),
BYTES_PER_READ((byte) 0x1F),
BYTES_PER_WRITE((byte) 0x20),
BYTES_PER_MULTIGET((byte) 0x21),
/**
* number of bytes compressed.
*/
BYTES_COMPRESSED((byte) 0x1A),
COMPRESSION_TIMES_NANOS((byte) 0x22),
/**
* number of bytes decompressed.
* <p>
* number of bytes is when uncompressed; i.e. before/after respectively
*/
BYTES_DECOMPRESSED((byte) 0x1B),
DECOMPRESSION_TIMES_NANOS((byte) 0x23),
COMPRESSION_TIMES_NANOS((byte) 0x1C),
DECOMPRESSION_TIMES_NANOS((byte) 0x1D),
READ_NUM_MERGE_OPERANDS((byte) 0x1E),
/**
* Time spent flushing memtable to disk.
*/
FLUSH_TIME((byte) 0x20),
READ_NUM_MERGE_OPERANDS((byte) 0x24),
/**
* Size of keys written to BlobDB.
*/
BLOB_DB_KEY_SIZE((byte) 0x21),
BLOB_DB_KEY_SIZE((byte) 0x25),
/**
* Size of values written to BlobDB.
*/
BLOB_DB_VALUE_SIZE((byte) 0x22),
BLOB_DB_VALUE_SIZE((byte) 0x26),
/**
* BlobDB Put/PutWithTTL/PutUntil/Write latency.
*/
BLOB_DB_WRITE_MICROS((byte) 0x23),
BLOB_DB_WRITE_MICROS((byte) 0x27),
/**
* BlobDB Get lagency.
*/
BLOB_DB_GET_MICROS((byte) 0x24),
BLOB_DB_GET_MICROS((byte) 0x28),
/**
* BlobDB MultiGet latency.
*/
BLOB_DB_MULTIGET_MICROS((byte) 0x25),
BLOB_DB_MULTIGET_MICROS((byte) 0x29),
/**
* BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency.
*/
BLOB_DB_SEEK_MICROS((byte) 0x26),
BLOB_DB_SEEK_MICROS((byte) 0x2A),
/**
* BlobDB Next latency.
*/
BLOB_DB_NEXT_MICROS((byte) 0x27),
BLOB_DB_NEXT_MICROS((byte) 0x2B),
/**
* BlobDB Prev latency.
*/
BLOB_DB_PREV_MICROS((byte) 0x28),
BLOB_DB_PREV_MICROS((byte) 0x2C),
/**
* Blob file write latency.
*/
BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x29),
BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x2D),
/**
* Blob file read latency.
*/
BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2A),
BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2E),
/**
* Blob file sync latency.
*/
BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2B),
BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2F),
/**
* BlobDB compression time.
*/
BLOB_DB_COMPRESSION_MICROS((byte) 0x2D),
BLOB_DB_COMPRESSION_MICROS((byte) 0x30),
/**
* BlobDB decompression time.
*/
BLOB_DB_DECOMPRESSION_MICROS((byte) 0x2E),
BLOB_DB_DECOMPRESSION_MICROS((byte) 0x31),
/**
* Time spent flushing memtable to disk.
*/
FLUSH_TIME((byte) 0x32),
/**
* Number of MultiGet batch keys overlapping a file
*/
SST_BATCH_SIZE((byte) 0x33),
/**
* Size of a single IO batch issued by MultiGet
*/
MULTIGET_IO_BATCH_SIZE((byte) 0x34),
/**
* Num of Index and Filter blocks read from file system per level in MultiGet
* request
*/
NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x2F),
NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x35),
/**
* Num of SST files read from file system per level in MultiGet request.
*/
NUM_SST_READ_PER_LEVEL((byte) 0x31),
NUM_SST_READ_PER_LEVEL((byte) 0x36),
/**
* Num of LSM levels read from file system per MultiGet request.
*/
NUM_LEVEL_READ_PER_MULTIGET((byte) 0x37),
/**
* The number of retry in auto resume
*/
ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x32),
ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x38),
ASYNC_READ_BYTES((byte) 0x33),
ASYNC_READ_BYTES((byte) 0x39),
POLL_WAIT_MICROS((byte) 0x3A),
/**
* Number of prefetched bytes discarded by RocksDB.
*/
PREFETCHED_BYTES_DISCARDED((byte) 0x3B),
/**
* Wait time for aborting async read in FilePrefetchBuffer destructor
*/
ASYNC_PREFETCH_ABORT_MICROS((byte) 0x3C),
/**
* Number of bytes read for RocksDB's prefetching contents
* (as opposed to file system's prefetch)
* from the end of SST table during block based table open
*/
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x39),
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x3D),
FILE_READ_FLUSH_MICROS((byte) 0x3A),
FILE_READ_COMPACTION_MICROS((byte) 0x3B),
FILE_READ_DB_OPEN_MICROS((byte) 0x3C),
FILE_READ_GET_MICROS((byte) 0x3D),
FILE_READ_MULTIGET_MICROS((byte) 0x3E),
FILE_READ_DB_ITERATOR_MICROS((byte) 0x3F),
FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x40),
FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x41),
SST_WRITE_MICROS((byte) 0x42),
FILE_WRITE_FLUSH_MICROS((byte) 0x43),
FILE_WRITE_COMPACTION_MICROS((byte) 0x44),
FILE_WRITE_DB_OPEN_MICROS((byte) 0x45),
// 0x1F for backwards compatibility on current minor version.
HISTOGRAM_ENUM_MAX((byte) 0x1F);
// 0x3E for backwards compatibility on current minor version.
HISTOGRAM_ENUM_MAX((byte) 0x3E);
private final byte value;

File diff suppressed because it is too large Load Diff

View File

@ -39,10 +39,42 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{BLOCK_CACHE_DATA_BYTES_INSERT, "rocksdb.block.cache.data.bytes.insert"},
{BLOCK_CACHE_BYTES_READ, "rocksdb.block.cache.bytes.read"},
{BLOCK_CACHE_BYTES_WRITE, "rocksdb.block.cache.bytes.write"},
{BLOCK_CACHE_COMPRESSION_DICT_MISS,
"rocksdb.block.cache.compression.dict.miss"},
{BLOCK_CACHE_COMPRESSION_DICT_HIT,
"rocksdb.block.cache.compression.dict.hit"},
{BLOCK_CACHE_COMPRESSION_DICT_ADD,
"rocksdb.block.cache.compression.dict.add"},
{BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
"rocksdb.block.cache.compression.dict.bytes.insert"},
{BLOCK_CACHE_ADD_REDUNDANT, "rocksdb.block.cache.add.redundant"},
{BLOCK_CACHE_INDEX_ADD_REDUNDANT,
"rocksdb.block.cache.index.add.redundant"},
{BLOCK_CACHE_FILTER_ADD_REDUNDANT,
"rocksdb.block.cache.filter.add.redundant"},
{BLOCK_CACHE_DATA_ADD_REDUNDANT, "rocksdb.block.cache.data.add.redundant"},
{BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT,
"rocksdb.block.cache.compression.dict.add.redundant"},
{SECONDARY_CACHE_HITS, "rocksdb.secondary.cache.hits"},
{SECONDARY_CACHE_FILTER_HITS, "rocksdb.secondary.cache.filter.hits"},
{SECONDARY_CACHE_INDEX_HITS, "rocksdb.secondary.cache.index.hits"},
{SECONDARY_CACHE_DATA_HITS, "rocksdb.secondary.cache.data.hits"},
{COMPRESSED_SECONDARY_CACHE_DUMMY_HITS,
"rocksdb.compressed.secondary.cache.dummy.hits"},
{COMPRESSED_SECONDARY_CACHE_HITS,
"rocksdb.compressed.secondary.cache.hits"},
{COMPRESSED_SECONDARY_CACHE_PROMOTIONS,
"rocksdb.compressed.secondary.cache.promotions"},
{COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS,
"rocksdb.compressed.secondary.cache.promotion.skips"},
{BLOOM_FILTER_USEFUL, "rocksdb.bloom.filter.useful"},
{BLOOM_FILTER_FULL_POSITIVE, "rocksdb.bloom.filter.full.positive"},
{BLOOM_FILTER_FULL_TRUE_POSITIVE,
"rocksdb.bloom.filter.full.true.positive"},
{BLOOM_FILTER_PREFIX_CHECKED, "rocksdb.bloom.filter.prefix.checked"},
{BLOOM_FILTER_PREFIX_USEFUL, "rocksdb.bloom.filter.prefix.useful"},
{BLOOM_FILTER_PREFIX_TRUE_POSITIVE,
"rocksdb.bloom.filter.prefix.true.positive"},
{PERSISTENT_CACHE_HIT, "rocksdb.persistent.cache.hit"},
{PERSISTENT_CACHE_MISS, "rocksdb.persistent.cache.miss"},
{SIM_BLOCK_CACHE_HIT, "rocksdb.sim.block.cache.hit"},
@ -73,6 +105,10 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{NUMBER_DB_NEXT_FOUND, "rocksdb.number.db.next.found"},
{NUMBER_DB_PREV_FOUND, "rocksdb.number.db.prev.found"},
{ITER_BYTES_READ, "rocksdb.db.iter.bytes.read"},
{NUMBER_ITER_SKIP, "rocksdb.number.iter.skip"},
{NUMBER_OF_RESEEKS_IN_ITERATION, "rocksdb.number.reseeks.iteration"},
{NO_ITERATOR_CREATED, "rocksdb.num.iterator.created"},
{NO_ITERATOR_DELETED, "rocksdb.num.iterator.deleted"},
{NO_FILE_OPENS, "rocksdb.no.file.opens"},
{NO_FILE_ERRORS, "rocksdb.no.file.errors"},
{STALL_MICROS, "rocksdb.stall.micros"},
@ -80,12 +116,8 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{NUMBER_MULTIGET_CALLS, "rocksdb.number.multiget.get"},
{NUMBER_MULTIGET_KEYS_READ, "rocksdb.number.multiget.keys.read"},
{NUMBER_MULTIGET_BYTES_READ, "rocksdb.number.multiget.bytes.read"},
{NUMBER_MULTIGET_KEYS_FOUND, "rocksdb.number.multiget.keys.found"},
{NUMBER_MERGE_FAILURES, "rocksdb.number.merge.failures"},
{BLOOM_FILTER_PREFIX_CHECKED, "rocksdb.bloom.filter.prefix.checked"},
{BLOOM_FILTER_PREFIX_USEFUL, "rocksdb.bloom.filter.prefix.useful"},
{BLOOM_FILTER_PREFIX_TRUE_POSITIVE,
"rocksdb.bloom.filter.prefix.true.positive"},
{NUMBER_OF_RESEEKS_IN_ITERATION, "rocksdb.number.reseeks.iteration"},
{GET_UPDATES_SINCE_CALLS, "rocksdb.getupdatessince.calls"},
{WAL_FILE_SYNCED, "rocksdb.wal.synced"},
{WAL_FILE_BYTES, "rocksdb.wal.bytes"},
@ -108,7 +140,16 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{NUMBER_SUPERVERSION_CLEANUPS, "rocksdb.number.superversion_cleanups"},
{NUMBER_BLOCK_COMPRESSED, "rocksdb.number.block.compressed"},
{NUMBER_BLOCK_DECOMPRESSED, "rocksdb.number.block.decompressed"},
{NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"},
{BYTES_COMPRESSED_FROM, "rocksdb.bytes.compressed.from"},
{BYTES_COMPRESSED_TO, "rocksdb.bytes.compressed.to"},
{BYTES_COMPRESSION_BYPASSED, "rocksdb.bytes.compression_bypassed"},
{BYTES_COMPRESSION_REJECTED, "rocksdb.bytes.compression.rejected"},
{NUMBER_BLOCK_COMPRESSION_BYPASSED,
"rocksdb.number.block_compression_bypassed"},
{NUMBER_BLOCK_COMPRESSION_REJECTED,
"rocksdb.number.block_compression_rejected"},
{BYTES_DECOMPRESSED_FROM, "rocksdb.bytes.decompressed.from"},
{BYTES_DECOMPRESSED_TO, "rocksdb.bytes.decompressed.to"},
{MERGE_OPERATION_TOTAL_TIME, "rocksdb.merge.operation.time.nanos"},
{FILTER_OPERATION_TOTAL_TIME, "rocksdb.filter.operation.time.nanos"},
{COMPACTION_CPU_TOTAL_TIME, "rocksdb.compaction.total.time.cpu_micros"},
@ -117,7 +158,6 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{READ_AMP_ESTIMATE_USEFUL_BYTES, "rocksdb.read.amp.estimate.useful.bytes"},
{READ_AMP_TOTAL_READ_BYTES, "rocksdb.read.amp.total.read.bytes"},
{NUMBER_RATE_LIMITER_DRAINS, "rocksdb.number.rate_limiter.drains"},
{NUMBER_ITER_SKIP, "rocksdb.number.iter.skip"},
{BLOB_DB_NUM_PUT, "rocksdb.blobdb.num.put"},
{BLOB_DB_NUM_WRITE, "rocksdb.blobdb.num.write"},
{BLOB_DB_NUM_GET, "rocksdb.blobdb.num.get"},
@ -150,31 +190,18 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{BLOB_DB_FIFO_NUM_FILES_EVICTED, "rocksdb.blobdb.fifo.num.files.evicted"},
{BLOB_DB_FIFO_NUM_KEYS_EVICTED, "rocksdb.blobdb.fifo.num.keys.evicted"},
{BLOB_DB_FIFO_BYTES_EVICTED, "rocksdb.blobdb.fifo.bytes.evicted"},
{BLOB_DB_CACHE_MISS, "rocksdb.blobdb.cache.miss"},
{BLOB_DB_CACHE_HIT, "rocksdb.blobdb.cache.hit"},
{BLOB_DB_CACHE_ADD, "rocksdb.blobdb.cache.add"},
{BLOB_DB_CACHE_ADD_FAILURES, "rocksdb.blobdb.cache.add.failures"},
{BLOB_DB_CACHE_BYTES_READ, "rocksdb.blobdb.cache.bytes.read"},
{BLOB_DB_CACHE_BYTES_WRITE, "rocksdb.blobdb.cache.bytes.write"},
{TXN_PREPARE_MUTEX_OVERHEAD, "rocksdb.txn.overhead.mutex.prepare"},
{TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD,
"rocksdb.txn.overhead.mutex.old.commit.map"},
{TXN_DUPLICATE_KEY_OVERHEAD, "rocksdb.txn.overhead.duplicate.key"},
{TXN_SNAPSHOT_MUTEX_OVERHEAD, "rocksdb.txn.overhead.mutex.snapshot"},
{TXN_GET_TRY_AGAIN, "rocksdb.txn.get.tryagain"},
{NUMBER_MULTIGET_KEYS_FOUND, "rocksdb.number.multiget.keys.found"},
{NO_ITERATOR_CREATED, "rocksdb.num.iterator.created"},
{NO_ITERATOR_DELETED, "rocksdb.num.iterator.deleted"},
{BLOCK_CACHE_COMPRESSION_DICT_MISS,
"rocksdb.block.cache.compression.dict.miss"},
{BLOCK_CACHE_COMPRESSION_DICT_HIT,
"rocksdb.block.cache.compression.dict.hit"},
{BLOCK_CACHE_COMPRESSION_DICT_ADD,
"rocksdb.block.cache.compression.dict.add"},
{BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
"rocksdb.block.cache.compression.dict.bytes.insert"},
{BLOCK_CACHE_ADD_REDUNDANT, "rocksdb.block.cache.add.redundant"},
{BLOCK_CACHE_INDEX_ADD_REDUNDANT,
"rocksdb.block.cache.index.add.redundant"},
{BLOCK_CACHE_FILTER_ADD_REDUNDANT,
"rocksdb.block.cache.filter.add.redundant"},
{BLOCK_CACHE_DATA_ADD_REDUNDANT, "rocksdb.block.cache.data.add.redundant"},
{BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT,
"rocksdb.block.cache.compression.dict.add.redundant"},
{FILES_MARKED_TRASH, "rocksdb.files.marked.trash"},
{FILES_DELETED_FROM_TRASH_QUEUE, "rocksdb.files.marked.trash.deleted"},
{FILES_DELETED_IMMEDIATELY, "rocksdb.files.deleted.immediately"},
@ -192,7 +219,6 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
"rocksdb.memtable.payload.bytes.at.flush"},
{MEMTABLE_GARBAGE_BYTES_AT_FLUSH,
"rocksdb.memtable.garbage.bytes.at.flush"},
{SECONDARY_CACHE_HITS, "rocksdb.secondary.cache.hits"},
{VERIFY_CHECKSUM_READ_BYTES, "rocksdb.verify_checksum.read.bytes"},
{BACKUP_READ_BYTES, "rocksdb.backup.read.bytes"},
{BACKUP_WRITE_BYTES, "rocksdb.backup.write.bytes"},
@ -226,46 +252,19 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{BLOCK_CHECKSUM_COMPUTE_COUNT, "rocksdb.block.checksum.compute.count"},
{BLOCK_CHECKSUM_MISMATCH_COUNT, "rocksdb.block.checksum.mismatch.count"},
{MULTIGET_COROUTINE_COUNT, "rocksdb.multiget.coroutine.count"},
{BLOB_DB_CACHE_MISS, "rocksdb.blobdb.cache.miss"},
{BLOB_DB_CACHE_HIT, "rocksdb.blobdb.cache.hit"},
{BLOB_DB_CACHE_ADD, "rocksdb.blobdb.cache.add"},
{BLOB_DB_CACHE_ADD_FAILURES, "rocksdb.blobdb.cache.add.failures"},
{BLOB_DB_CACHE_BYTES_READ, "rocksdb.blobdb.cache.bytes.read"},
{BLOB_DB_CACHE_BYTES_WRITE, "rocksdb.blobdb.cache.bytes.write"},
{READ_ASYNC_MICROS, "rocksdb.read.async.micros"},
{ASYNC_READ_ERROR_COUNT, "rocksdb.async.read.error.count"},
{SECONDARY_CACHE_FILTER_HITS, "rocksdb.secondary.cache.filter.hits"},
{SECONDARY_CACHE_INDEX_HITS, "rocksdb.secondary.cache.index.hits"},
{SECONDARY_CACHE_DATA_HITS, "rocksdb.secondary.cache.data.hits"},
{TABLE_OPEN_PREFETCH_TAIL_MISS, "rocksdb.table.open.prefetch.tail.miss"},
{TABLE_OPEN_PREFETCH_TAIL_HIT, "rocksdb.table.open.prefetch.tail.hit"},
{TIMESTAMP_FILTER_TABLE_CHECKED, "rocksdb.timestamp.filter.table.checked"},
{TIMESTAMP_FILTER_TABLE_FILTERED,
"rocksdb.timestamp.filter.table.filtered"},
{BYTES_COMPRESSED_FROM, "rocksdb.bytes.compressed.from"},
{BYTES_COMPRESSED_TO, "rocksdb.bytes.compressed.to"},
{BYTES_COMPRESSION_BYPASSED, "rocksdb.bytes.compression_bypassed"},
{BYTES_COMPRESSION_REJECTED, "rocksdb.bytes.compression.rejected"},
{NUMBER_BLOCK_COMPRESSION_BYPASSED,
"rocksdb.number.block_compression_bypassed"},
{NUMBER_BLOCK_COMPRESSION_REJECTED,
"rocksdb.number.block_compression_rejected"},
{BYTES_DECOMPRESSED_FROM, "rocksdb.bytes.decompressed.from"},
{BYTES_DECOMPRESSED_TO, "rocksdb.bytes.decompressed.to"},
{READAHEAD_TRIMMED, "rocksdb.readahead.trimmed"},
{FIFO_MAX_SIZE_COMPACTIONS, "rocksdb.fifo.max.size.compactions"},
{FIFO_TTL_COMPACTIONS, "rocksdb.fifo.ttl.compactions"},
{PREFETCH_BYTES, "rocksdb.prefetch.bytes"},
{PREFETCH_BYTES_USEFUL, "rocksdb.prefetch.bytes.useful"},
{PREFETCH_HITS, "rocksdb.prefetch.hits"},
{COMPRESSED_SECONDARY_CACHE_DUMMY_HITS,
"rocksdb.compressed.secondary.cache.dummy.hits"},
{COMPRESSED_SECONDARY_CACHE_HITS,
"rocksdb.compressed.secondary.cache.hits"},
{COMPRESSED_SECONDARY_CACHE_PROMOTIONS,
"rocksdb.compressed.secondary.cache.promotions"},
{COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS,
"rocksdb.compressed.secondary.cache.promotion.skips"},
};
const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
@ -305,8 +304,6 @@ const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
{BYTES_PER_READ, "rocksdb.bytes.per.read"},
{BYTES_PER_WRITE, "rocksdb.bytes.per.write"},
{BYTES_PER_MULTIGET, "rocksdb.bytes.per.multiget"},
{BYTES_COMPRESSED, "rocksdb.bytes.compressed"},
{BYTES_DECOMPRESSED, "rocksdb.bytes.decompressed"},
{COMPRESSION_TIMES_NANOS, "rocksdb.compression.times.nanos"},
{DECOMPRESSION_TIMES_NANOS, "rocksdb.decompression.times.nanos"},
{READ_NUM_MERGE_OPERANDS, "rocksdb.read.num.merge_operands"},
@ -325,16 +322,16 @@ const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
{BLOB_DB_DECOMPRESSION_MICROS, "rocksdb.blobdb.decompression.micros"},
{FLUSH_TIME, "rocksdb.db.flush.micros"},
{SST_BATCH_SIZE, "rocksdb.sst.batch.size"},
{MULTIGET_IO_BATCH_SIZE, "rocksdb.multiget.io.batch.size"},
{NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL,
"rocksdb.num.index.and.filter.blocks.read.per.level"},
{NUM_SST_READ_PER_LEVEL, "rocksdb.num.sst.read.per.level"},
{NUM_LEVEL_READ_PER_MULTIGET, "rocksdb.num.level.read.per.multiget"},
{ERROR_HANDLER_AUTORESUME_RETRY_COUNT,
"rocksdb.error.handler.autoresume.retry.count"},
{ASYNC_READ_BYTES, "rocksdb.async.read.bytes"},
{POLL_WAIT_MICROS, "rocksdb.poll.wait.micros"},
{PREFETCHED_BYTES_DISCARDED, "rocksdb.prefetched.bytes.discarded"},
{MULTIGET_IO_BATCH_SIZE, "rocksdb.multiget.io.batch.size"},
{NUM_LEVEL_READ_PER_MULTIGET, "rocksdb.num.level.read.per.multiget"},
{ASYNC_PREFETCH_ABORT_MICROS, "rocksdb.async.prefetch.abort.micros"},
{TABLE_OPEN_PREFETCH_TAIL_READ_BYTES,
"rocksdb.table.open.prefetch.tail.read.bytes"},