2016-03-11 01:35:19 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-03-11 01:35:19 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include <cstdlib>
|
2021-08-17 03:36:19 +00:00
|
|
|
#include <functional>
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
#include <memory>
|
2020-07-09 21:33:42 +00:00
|
|
|
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
#include "cache/cache_entry_roles.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "cache/lru_cache.h"
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
#include "db/column_family.h"
|
2016-03-11 01:35:19 +00:00
|
|
|
#include "db/db_test_util.h"
|
|
|
|
#include "port/stack_trace.h"
|
2021-08-17 03:36:19 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
#include "rocksdb/table.h"
|
2019-12-17 21:52:09 +00:00
|
|
|
#include "util/compression.h"
|
2021-08-18 18:32:00 +00:00
|
|
|
#include "util/defer.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/random.h"
|
2021-08-17 03:36:19 +00:00
|
|
|
#include "utilities/fault_injection_fs.h"
|
2016-03-11 01:35:19 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-03-11 01:35:19 +00:00
|
|
|
|
|
|
|
class DBBlockCacheTest : public DBTestBase {
|
|
|
|
private:
|
|
|
|
size_t miss_count_ = 0;
|
|
|
|
size_t hit_count_ = 0;
|
|
|
|
size_t insert_count_ = 0;
|
|
|
|
size_t failure_count_ = 0;
|
2019-07-23 22:57:43 +00:00
|
|
|
size_t compression_dict_miss_count_ = 0;
|
|
|
|
size_t compression_dict_hit_count_ = 0;
|
|
|
|
size_t compression_dict_insert_count_ = 0;
|
2016-03-11 01:35:19 +00:00
|
|
|
size_t compressed_miss_count_ = 0;
|
|
|
|
size_t compressed_hit_count_ = 0;
|
|
|
|
size_t compressed_insert_count_ = 0;
|
|
|
|
size_t compressed_failure_count_ = 0;
|
|
|
|
|
|
|
|
public:
|
|
|
|
const size_t kNumBlocks = 10;
|
|
|
|
const size_t kValueSize = 100;
|
|
|
|
|
2020-08-18 01:41:20 +00:00
|
|
|
DBBlockCacheTest()
|
2021-07-23 15:37:27 +00:00
|
|
|
: DBTestBase("db_block_cache_test", /*env_do_fsync=*/true) {}
|
2016-03-11 01:35:19 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions GetTableOptions() {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
// Set a small enough block size so that each key-value get its own block.
|
|
|
|
table_options.block_size = 1;
|
|
|
|
return table_options;
|
|
|
|
}
|
|
|
|
|
|
|
|
Options GetOptions(const BlockBasedTableOptions& table_options) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2016-06-13 18:34:16 +00:00
|
|
|
options.avoid_flush_during_recovery = false;
|
2016-03-11 01:35:19 +00:00
|
|
|
// options.compression = kNoCompression;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-03-11 01:35:19 +00:00
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
void InitTable(const Options& /*options*/) {
|
2016-03-11 01:35:19 +00:00
|
|
|
std::string value(kValueSize, 'a');
|
|
|
|
for (size_t i = 0; i < kNumBlocks; i++) {
|
|
|
|
ASSERT_OK(Put(ToString(i), value.c_str()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void RecordCacheCounters(const Options& options) {
|
|
|
|
miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS);
|
|
|
|
hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT);
|
|
|
|
insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD);
|
|
|
|
failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
compressed_miss_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
|
|
|
|
compressed_hit_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
|
|
|
|
compressed_insert_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
|
|
|
|
compressed_failure_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
|
|
|
|
}
|
|
|
|
|
2019-07-23 22:57:43 +00:00
|
|
|
void RecordCacheCountersForCompressionDict(const Options& options) {
|
|
|
|
compression_dict_miss_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
|
|
|
|
compression_dict_hit_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_HIT);
|
|
|
|
compression_dict_insert_count_ =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_ADD);
|
|
|
|
}
|
|
|
|
|
2016-03-11 01:35:19 +00:00
|
|
|
void CheckCacheCounters(const Options& options, size_t expected_misses,
|
|
|
|
size_t expected_hits, size_t expected_inserts,
|
|
|
|
size_t expected_failures) {
|
|
|
|
size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS);
|
|
|
|
size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT);
|
|
|
|
size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD);
|
|
|
|
size_t new_failure_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
ASSERT_EQ(miss_count_ + expected_misses, new_miss_count);
|
|
|
|
ASSERT_EQ(hit_count_ + expected_hits, new_hit_count);
|
|
|
|
ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count);
|
|
|
|
ASSERT_EQ(failure_count_ + expected_failures, new_failure_count);
|
|
|
|
miss_count_ = new_miss_count;
|
|
|
|
hit_count_ = new_hit_count;
|
|
|
|
insert_count_ = new_insert_count;
|
|
|
|
failure_count_ = new_failure_count;
|
|
|
|
}
|
|
|
|
|
2019-07-23 22:57:43 +00:00
|
|
|
void CheckCacheCountersForCompressionDict(
|
|
|
|
const Options& options, size_t expected_compression_dict_misses,
|
|
|
|
size_t expected_compression_dict_hits,
|
|
|
|
size_t expected_compression_dict_inserts) {
|
|
|
|
size_t new_compression_dict_miss_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
|
|
|
|
size_t new_compression_dict_hit_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_HIT);
|
|
|
|
size_t new_compression_dict_insert_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_ADD);
|
|
|
|
ASSERT_EQ(compression_dict_miss_count_ + expected_compression_dict_misses,
|
|
|
|
new_compression_dict_miss_count);
|
|
|
|
ASSERT_EQ(compression_dict_hit_count_ + expected_compression_dict_hits,
|
|
|
|
new_compression_dict_hit_count);
|
|
|
|
ASSERT_EQ(
|
|
|
|
compression_dict_insert_count_ + expected_compression_dict_inserts,
|
|
|
|
new_compression_dict_insert_count);
|
|
|
|
compression_dict_miss_count_ = new_compression_dict_miss_count;
|
|
|
|
compression_dict_hit_count_ = new_compression_dict_hit_count;
|
|
|
|
compression_dict_insert_count_ = new_compression_dict_insert_count;
|
|
|
|
}
|
|
|
|
|
2016-03-11 01:35:19 +00:00
|
|
|
void CheckCompressedCacheCounters(const Options& options,
|
|
|
|
size_t expected_misses,
|
|
|
|
size_t expected_hits,
|
|
|
|
size_t expected_inserts,
|
|
|
|
size_t expected_failures) {
|
|
|
|
size_t new_miss_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
|
|
|
|
size_t new_hit_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
|
|
|
|
size_t new_insert_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
|
|
|
|
size_t new_failure_count =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
|
|
|
|
ASSERT_EQ(compressed_miss_count_ + expected_misses, new_miss_count);
|
|
|
|
ASSERT_EQ(compressed_hit_count_ + expected_hits, new_hit_count);
|
|
|
|
ASSERT_EQ(compressed_insert_count_ + expected_inserts, new_insert_count);
|
|
|
|
ASSERT_EQ(compressed_failure_count_ + expected_failures, new_failure_count);
|
|
|
|
compressed_miss_count_ = new_miss_count;
|
|
|
|
compressed_hit_count_ = new_hit_count;
|
|
|
|
compressed_insert_count_ = new_insert_count;
|
|
|
|
compressed_failure_count_ = new_failure_count;
|
|
|
|
}
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
const std::array<size_t, kNumCacheEntryRoles> GetCacheEntryRoleCountsBg() {
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
// Verify in cache entry role stats
|
|
|
|
ColumnFamilyHandleImpl* cfh =
|
|
|
|
static_cast<ColumnFamilyHandleImpl*>(dbfull()->DefaultColumnFamily());
|
|
|
|
InternalStats* internal_stats_ptr = cfh->cfd()->internal_stats();
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
InternalStats::CacheEntryRoleStats stats;
|
|
|
|
internal_stats_ptr->TEST_GetCacheEntryRoleStats(&stats,
|
|
|
|
/*foreground=*/false);
|
|
|
|
return stats.entry_counts;
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
2016-03-11 01:35:19 +00:00
|
|
|
};
|
|
|
|
|
2018-01-29 22:34:56 +00:00
|
|
|
TEST_F(DBBlockCacheTest, IteratorBlockCacheUsage) {
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.fill_cache = false;
|
|
|
|
auto table_options = GetTableOptions();
|
|
|
|
auto options = GetOptions(table_options);
|
|
|
|
InitTable(options);
|
|
|
|
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = 0;
|
|
|
|
co.num_shard_bits = 0;
|
|
|
|
co.strict_capacity_limit = false;
|
|
|
|
// Needed not to count entry stats collector
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(co);
|
2018-01-29 22:34:56 +00:00
|
|
|
table_options.block_cache = cache;
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2018-01-29 22:34:56 +00:00
|
|
|
Reopen(options);
|
|
|
|
RecordCacheCounters(options);
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
|
|
|
|
Iterator* iter = nullptr;
|
|
|
|
|
|
|
|
ASSERT_EQ(0, cache->GetUsage());
|
|
|
|
iter = db_->NewIterator(read_options);
|
|
|
|
iter->Seek(ToString(0));
|
|
|
|
ASSERT_LT(0, cache->GetUsage());
|
|
|
|
delete iter;
|
|
|
|
iter = nullptr;
|
|
|
|
ASSERT_EQ(0, cache->GetUsage());
|
|
|
|
}
|
|
|
|
|
2016-03-11 01:35:19 +00:00
|
|
|
TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
|
|
|
ReadOptions read_options;
|
|
|
|
auto table_options = GetTableOptions();
|
|
|
|
auto options = GetOptions(table_options);
|
|
|
|
InitTable(options);
|
|
|
|
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = 0;
|
|
|
|
co.num_shard_bits = 0;
|
|
|
|
co.strict_capacity_limit = false;
|
|
|
|
// Needed not to count entry stats collector
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(co);
|
2016-03-11 01:35:19 +00:00
|
|
|
table_options.block_cache = cache;
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-03-11 01:35:19 +00:00
|
|
|
Reopen(options);
|
|
|
|
RecordCacheCounters(options);
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
|
|
|
|
Iterator* iter = nullptr;
|
|
|
|
|
|
|
|
// Load blocks into cache.
|
2020-06-02 22:02:44 +00:00
|
|
|
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
2016-03-11 01:35:19 +00:00
|
|
|
iter = db_->NewIterator(read_options);
|
|
|
|
iter->Seek(ToString(i));
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
CheckCacheCounters(options, 1, 0, 1, 0);
|
|
|
|
iterators[i].reset(iter);
|
|
|
|
}
|
|
|
|
size_t usage = cache->GetUsage();
|
|
|
|
ASSERT_LT(0, usage);
|
|
|
|
cache->SetCapacity(usage);
|
|
|
|
ASSERT_EQ(usage, cache->GetPinnedUsage());
|
|
|
|
|
|
|
|
// Test with strict capacity limit.
|
|
|
|
cache->SetStrictCapacityLimit(true);
|
|
|
|
iter = db_->NewIterator(read_options);
|
|
|
|
iter->Seek(ToString(kNumBlocks - 1));
|
|
|
|
ASSERT_TRUE(iter->status().IsIncomplete());
|
|
|
|
CheckCacheCounters(options, 1, 0, 0, 1);
|
|
|
|
delete iter;
|
|
|
|
iter = nullptr;
|
|
|
|
|
2019-01-02 19:15:01 +00:00
|
|
|
// Release iterators and access cache again.
|
2020-06-02 22:02:44 +00:00
|
|
|
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
2016-03-11 01:35:19 +00:00
|
|
|
iterators[i].reset();
|
|
|
|
CheckCacheCounters(options, 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(0, cache->GetPinnedUsage());
|
2020-06-02 22:02:44 +00:00
|
|
|
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
2016-03-11 01:35:19 +00:00
|
|
|
iter = db_->NewIterator(read_options);
|
|
|
|
iter->Seek(ToString(i));
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
CheckCacheCounters(options, 0, 1, 0, 0);
|
|
|
|
iterators[i].reset(iter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 19:17:40 +00:00
|
|
|
#ifdef SNAPPY
|
2016-03-11 01:35:19 +00:00
|
|
|
TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
2021-06-18 04:55:42 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
table_options.block_cache_compressed = nullptr;
|
|
|
|
table_options.block_size = 1;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
|
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-03-15 19:17:40 +00:00
|
|
|
options.compression = CompressionType::kSnappyCompression;
|
2016-03-11 01:35:19 +00:00
|
|
|
|
2021-06-18 04:55:42 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
std::string value(kValueSize, 'a');
|
|
|
|
for (size_t i = 0; i < kNumBlocks; i++) {
|
|
|
|
ASSERT_OK(Put(ToString(i), value));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
}
|
|
|
|
|
|
|
|
ReadOptions read_options;
|
2016-08-23 20:53:49 +00:00
|
|
|
std::shared_ptr<Cache> compressed_cache = NewLRUCache(1 << 25, 0, false);
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = 0;
|
|
|
|
co.num_shard_bits = 0;
|
|
|
|
co.strict_capacity_limit = false;
|
|
|
|
// Needed not to count entry stats collector
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(co);
|
2016-03-11 01:35:19 +00:00
|
|
|
table_options.block_cache = cache;
|
2021-06-18 04:55:42 +00:00
|
|
|
table_options.no_block_cache = false;
|
2016-03-11 01:35:19 +00:00
|
|
|
table_options.block_cache_compressed = compressed_cache;
|
2021-06-18 04:55:42 +00:00
|
|
|
table_options.max_auto_readahead_size = 0;
|
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-03-11 01:35:19 +00:00
|
|
|
Reopen(options);
|
|
|
|
RecordCacheCounters(options);
|
|
|
|
|
|
|
|
// Load blocks into cache.
|
2021-06-18 04:55:42 +00:00
|
|
|
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
|
|
|
ASSERT_EQ(value, Get(ToString(i)));
|
2016-03-11 01:35:19 +00:00
|
|
|
CheckCacheCounters(options, 1, 0, 1, 0);
|
|
|
|
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
|
|
|
}
|
2021-06-18 04:55:42 +00:00
|
|
|
|
2016-03-11 01:35:19 +00:00
|
|
|
size_t usage = cache->GetUsage();
|
2021-06-18 04:55:42 +00:00
|
|
|
ASSERT_EQ(0, usage);
|
2016-03-11 01:35:19 +00:00
|
|
|
ASSERT_EQ(usage, cache->GetPinnedUsage());
|
|
|
|
size_t compressed_usage = compressed_cache->GetUsage();
|
|
|
|
ASSERT_LT(0, compressed_usage);
|
|
|
|
// Compressed block cache cannot be pinned.
|
|
|
|
ASSERT_EQ(0, compressed_cache->GetPinnedUsage());
|
|
|
|
|
|
|
|
// Set strict capacity limit flag. Now block will only load into compressed
|
|
|
|
// block cache.
|
|
|
|
cache->SetCapacity(usage);
|
|
|
|
cache->SetStrictCapacityLimit(true);
|
|
|
|
ASSERT_EQ(usage, cache->GetPinnedUsage());
|
2021-06-18 04:55:42 +00:00
|
|
|
|
|
|
|
// Load last key block.
|
|
|
|
ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.",
|
|
|
|
Get(ToString(kNumBlocks - 1)));
|
2021-07-14 01:12:03 +00:00
|
|
|
// Failure will also record the miss counter.
|
|
|
|
CheckCacheCounters(options, 1, 0, 0, 1);
|
2016-03-11 01:35:19 +00:00
|
|
|
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
|
|
|
|
|
|
|
// Clear strict capacity limit flag. This time we shall hit compressed block
|
2021-06-18 04:55:42 +00:00
|
|
|
// cache and load into block cache.
|
2016-03-11 01:35:19 +00:00
|
|
|
cache->SetStrictCapacityLimit(false);
|
2021-06-18 04:55:42 +00:00
|
|
|
// Load last key block.
|
|
|
|
ASSERT_EQ(value, Get(ToString(kNumBlocks - 1)));
|
2016-03-11 01:35:19 +00:00
|
|
|
CheckCacheCounters(options, 1, 0, 1, 0);
|
|
|
|
CheckCompressedCacheCounters(options, 0, 1, 0, 0);
|
|
|
|
}
|
2021-11-16 19:14:02 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
class PersistentCacheFromCache : public PersistentCache {
|
|
|
|
public:
|
|
|
|
PersistentCacheFromCache(std::shared_ptr<Cache> cache, bool read_only)
|
|
|
|
: cache_(cache), read_only_(read_only) {}
|
|
|
|
|
|
|
|
Status Insert(const Slice& key, const char* data,
|
|
|
|
const size_t size) override {
|
|
|
|
if (read_only_) {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
std::unique_ptr<char[]> copy{new char[size]};
|
|
|
|
std::copy_n(data, size, copy.get());
|
|
|
|
Status s = cache_->Insert(
|
|
|
|
key, copy.get(), size,
|
|
|
|
GetCacheEntryDeleterForRole<char[], CacheEntryRole::kMisc>());
|
|
|
|
if (s.ok()) {
|
|
|
|
copy.release();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Lookup(const Slice& key, std::unique_ptr<char[]>* data,
|
|
|
|
size_t* size) override {
|
|
|
|
auto handle = cache_->Lookup(key);
|
|
|
|
if (handle) {
|
|
|
|
char* ptr = static_cast<char*>(cache_->Value(handle));
|
|
|
|
*size = cache_->GetCharge(handle);
|
|
|
|
data->reset(new char[*size]);
|
|
|
|
std::copy_n(ptr, *size, data->get());
|
|
|
|
cache_->Release(handle);
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsCompressed() override { return false; }
|
|
|
|
|
|
|
|
StatsType Stats() override { return StatsType(); }
|
|
|
|
|
|
|
|
std::string GetPrintableOptions() const override { return ""; }
|
|
|
|
|
|
|
|
uint64_t NewId() override { return cache_->NewId(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::shared_ptr<Cache> cache_;
|
|
|
|
bool read_only_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ReadOnlyCacheWrapper : public CacheWrapper {
|
|
|
|
using CacheWrapper::CacheWrapper;
|
|
|
|
|
|
|
|
using Cache::Insert;
|
|
|
|
Status Insert(const Slice& /*key*/, void* /*value*/, size_t /*charge*/,
|
|
|
|
void (*)(const Slice& key, void* value) /*deleter*/,
|
|
|
|
Handle** /*handle*/, Priority /*priority*/) override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(DBBlockCacheTest, TestWithSameCompressed) {
|
|
|
|
auto table_options = GetTableOptions();
|
|
|
|
auto options = GetOptions(table_options);
|
|
|
|
InitTable(options);
|
|
|
|
|
|
|
|
std::shared_ptr<Cache> rw_cache{NewLRUCache(1000000)};
|
|
|
|
std::shared_ptr<PersistentCacheFromCache> rw_pcache{
|
|
|
|
new PersistentCacheFromCache(rw_cache, /*read_only*/ false)};
|
|
|
|
// Exercise some obscure behavior with read-only wrappers
|
|
|
|
std::shared_ptr<Cache> ro_cache{new ReadOnlyCacheWrapper(rw_cache)};
|
|
|
|
std::shared_ptr<PersistentCacheFromCache> ro_pcache{
|
|
|
|
new PersistentCacheFromCache(rw_cache, /*read_only*/ true)};
|
|
|
|
|
|
|
|
// Simple same pointer
|
|
|
|
table_options.block_cache = rw_cache;
|
|
|
|
table_options.block_cache_compressed = rw_cache;
|
|
|
|
table_options.persistent_cache.reset();
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: block_cache same as block_cache_compressed not "
|
|
|
|
"currently supported, and would be bad for performance anyway");
|
|
|
|
|
|
|
|
// Other cases
|
|
|
|
table_options.block_cache = ro_cache;
|
|
|
|
table_options.block_cache_compressed = rw_cache;
|
|
|
|
table_options.persistent_cache.reset();
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: block_cache and block_cache_compressed share "
|
|
|
|
"the same key space, which is not supported");
|
|
|
|
|
|
|
|
table_options.block_cache = rw_cache;
|
|
|
|
table_options.block_cache_compressed = ro_cache;
|
|
|
|
table_options.persistent_cache.reset();
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: block_cache_compressed and block_cache share "
|
|
|
|
"the same key space, which is not supported");
|
|
|
|
|
|
|
|
table_options.block_cache = ro_cache;
|
|
|
|
table_options.block_cache_compressed.reset();
|
|
|
|
table_options.persistent_cache = rw_pcache;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: block_cache and persistent_cache share the same "
|
|
|
|
"key space, which is not supported");
|
|
|
|
|
|
|
|
table_options.block_cache = rw_cache;
|
|
|
|
table_options.block_cache_compressed.reset();
|
|
|
|
table_options.persistent_cache = ro_pcache;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: persistent_cache and block_cache share the same "
|
|
|
|
"key space, which is not supported");
|
|
|
|
|
|
|
|
table_options.block_cache.reset();
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
table_options.block_cache_compressed = ro_cache;
|
|
|
|
table_options.persistent_cache = rw_pcache;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: block_cache_compressed and persistent_cache "
|
|
|
|
"share the same key space, which is not supported");
|
|
|
|
|
|
|
|
table_options.block_cache.reset();
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
table_options.block_cache_compressed = rw_cache;
|
|
|
|
table_options.persistent_cache = ro_pcache;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
ASSERT_EQ(TryReopen(options).ToString(),
|
|
|
|
"Invalid argument: persistent_cache and block_cache_compressed "
|
|
|
|
"share the same key space, which is not supported");
|
|
|
|
}
|
2016-04-18 18:34:11 +00:00
|
|
|
#endif // SNAPPY
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
2016-04-18 16:42:50 +00:00
|
|
|
|
|
|
|
// Make sure that when options.block_cache is set, after a new table is
|
|
|
|
// created its index/filter blocks are added to block cache.
|
|
|
|
TEST_F(DBBlockCacheTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2016-04-18 16:42:50 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-04-18 16:42:50 +00:00
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "key", "val"));
|
|
|
|
// Create a new table.
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
|
|
|
|
// index/filter blocks added to block cache right after table creation.
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(2, /* only index/filter were added */
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
|
|
|
|
uint64_t int_num;
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
|
|
|
|
// Make sure filter block is in cache.
|
|
|
|
std::string value;
|
|
|
|
ReadOptions ropt;
|
|
|
|
db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
|
|
|
|
|
|
|
|
// Miss count should remain the same.
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
|
|
|
|
|
|
|
|
db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
|
|
|
|
|
|
|
|
// Make sure index block is in cache.
|
2016-07-26 18:15:14 +00:00
|
|
|
auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
|
2016-04-18 16:42:50 +00:00
|
|
|
value = Get(1, "key");
|
2016-07-26 18:15:14 +00:00
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
2016-04-18 16:42:50 +00:00
|
|
|
ASSERT_EQ(index_block_hit + 1,
|
2016-07-26 18:15:14 +00:00
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
|
2016-04-18 16:42:50 +00:00
|
|
|
|
|
|
|
value = Get(1, "key");
|
2016-07-26 18:15:14 +00:00
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
2016-04-18 16:42:50 +00:00
|
|
|
ASSERT_EQ(index_block_hit + 2,
|
2016-07-26 18:15:14 +00:00
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
|
2016-04-18 16:42:50 +00:00
|
|
|
}
|
|
|
|
|
2018-05-01 22:42:06 +00:00
|
|
|
// With fill_cache = false, fills up the cache, then iterates over the entire
|
|
|
|
// db, verify dummy entries inserted in `BlockBasedTable::NewDataBlockIterator`
|
|
|
|
// does not cause heap-use-after-free errors in COMPILE_WITH_ASAN=1 runs
|
|
|
|
TEST_F(DBBlockCacheTest, FillCacheAndIterateDB) {
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.fill_cache = false;
|
|
|
|
auto table_options = GetTableOptions();
|
|
|
|
auto options = GetOptions(table_options);
|
|
|
|
InitTable(options);
|
|
|
|
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(10, 0, true);
|
|
|
|
table_options.block_cache = cache;
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2018-05-01 22:42:06 +00:00
|
|
|
Reopen(options);
|
|
|
|
ASSERT_OK(Put("key1", "val1"));
|
|
|
|
ASSERT_OK(Put("key2", "val2"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(Put("key3", "val3"));
|
|
|
|
ASSERT_OK(Put("key4", "val4"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(Put("key5", "val5"));
|
|
|
|
ASSERT_OK(Put("key6", "val6"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
Iterator* iter = nullptr;
|
|
|
|
|
|
|
|
iter = db_->NewIterator(read_options);
|
|
|
|
iter->Seek(ToString(0));
|
|
|
|
while (iter->Valid()) {
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
iter = nullptr;
|
|
|
|
}
|
|
|
|
|
2016-06-03 17:47:47 +00:00
|
|
|
TEST_F(DBBlockCacheTest, IndexAndFilterBlocksStats) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2016-06-03 17:47:47 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
2019-09-16 22:14:51 +00:00
|
|
|
LRUCacheOptions co;
|
2019-04-22 15:17:45 +00:00
|
|
|
// 500 bytes are enough to hold the first two blocks
|
2019-09-16 22:14:51 +00:00
|
|
|
co.capacity = 500;
|
|
|
|
co.num_shard_bits = 0;
|
|
|
|
co.strict_capacity_limit = false;
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(co);
|
2016-06-03 17:47:47 +00:00
|
|
|
table_options.block_cache = cache;
|
2018-11-30 21:11:11 +00:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(20, true));
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-06-03 17:47:47 +00:00
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
|
2019-04-22 15:17:45 +00:00
|
|
|
ASSERT_OK(Put(1, "longer_key", "val"));
|
2016-06-03 17:47:47 +00:00
|
|
|
// Create a new table
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
size_t index_bytes_insert =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT);
|
|
|
|
size_t filter_bytes_insert =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT);
|
|
|
|
ASSERT_GT(index_bytes_insert, 0);
|
|
|
|
ASSERT_GT(filter_bytes_insert, 0);
|
|
|
|
ASSERT_EQ(cache->GetUsage(), index_bytes_insert + filter_bytes_insert);
|
|
|
|
// set the cache capacity to the current usage
|
|
|
|
cache->SetCapacity(index_bytes_insert + filter_bytes_insert);
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
2019-07-16 20:11:23 +00:00
|
|
|
// The index and filter eviction statistics were broken by the refactoring
|
|
|
|
// that moved the readers out of the block cache. Disabling these until we can
|
Move the index readers out of the block cache (#5298)
Summary:
Currently, when the block cache is used for index blocks as well, it is
not really the index block that is stored in the cache but an
IndexReader object. Since this object is not pure data (it has, for
instance, pointers that might dangle), it's not really sharable. To
avoid the issues around this, the current code uses a dummy unique cache
key for each TableReader to store the IndexReader, and erases the
IndexReader entry when the TableReader is closed. Instead of doing this,
the new code moves the IndexReader out of the cache altogether. In
particular, instead of the TableReader owning, or caching/pinning the
IndexReader based on the customer's settings, the TableReader
unconditionally owns the IndexReader, which in turn owns/caches/pins
the index block (which is itself sharable and thus can be safely put in
the cache without any hacks).
Note: the change has two side effects:
1) Partitions of partitioned indexes no longer affect the read
amplification statistics.
2) Eviction statistics for index blocks are temporarily broken. We plan to fix
this in a separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5298
Differential Revision: D15303203
Pulled By: ltamasi
fbshipit-source-id: 935a69ba59d87d5e44f42e2310619b790c366e47
2019-05-30 18:49:36 +00:00
|
|
|
// bring the stats back.
|
|
|
|
// ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), 0);
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
2019-07-16 20:11:23 +00:00
|
|
|
// ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), 0);
|
2019-04-22 15:17:45 +00:00
|
|
|
// Note that the second key needs to be no longer than the first one.
|
|
|
|
// Otherwise the second index block may not fit in cache.
|
|
|
|
ASSERT_OK(Put(1, "key", "val"));
|
2016-06-03 17:47:47 +00:00
|
|
|
// Create a new table
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
// cache evicted old index and block entries
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT),
|
|
|
|
index_bytes_insert);
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT),
|
|
|
|
filter_bytes_insert);
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
2019-07-16 20:11:23 +00:00
|
|
|
// The index and filter eviction statistics were broken by the refactoring
|
|
|
|
// that moved the readers out of the block cache. Disabling these until we can
|
Move the index readers out of the block cache (#5298)
Summary:
Currently, when the block cache is used for index blocks as well, it is
not really the index block that is stored in the cache but an
IndexReader object. Since this object is not pure data (it has, for
instance, pointers that might dangle), it's not really sharable. To
avoid the issues around this, the current code uses a dummy unique cache
key for each TableReader to store the IndexReader, and erases the
IndexReader entry when the TableReader is closed. Instead of doing this,
the new code moves the IndexReader out of the cache altogether. In
particular, instead of the TableReader owning, or caching/pinning the
IndexReader based on the customer's settings, the TableReader
unconditionally owns the IndexReader, which in turn owns/caches/pins
the index block (which is itself sharable and thus can be safely put in
the cache without any hacks).
Note: the change has two side effects:
1) Partitions of partitioned indexes no longer affect the read
amplification statistics.
2) Eviction statistics for index blocks are temporarily broken. We plan to fix
this in a separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5298
Differential Revision: D15303203
Pulled By: ltamasi
fbshipit-source-id: 935a69ba59d87d5e44f42e2310619b790c366e47
2019-05-30 18:49:36 +00:00
|
|
|
// bring the stats back.
|
|
|
|
// ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT),
|
|
|
|
// index_bytes_insert);
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
2019-07-16 20:11:23 +00:00
|
|
|
// ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT),
|
|
|
|
// filter_bytes_insert);
|
2016-06-03 17:47:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-18 04:55:42 +00:00
|
|
|
#if (defined OS_LINUX || defined OS_WIN)
|
|
|
|
TEST_F(DBBlockCacheTest, WarmCacheWithDataBlocksDuringFlush) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = NewLRUCache(1 << 25, 0, false);
|
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
table_options.prepopulate_block_cache =
|
|
|
|
BlockBasedTableOptions::PrepopulateBlockCache::kFlushOnly;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
std::string value(kValueSize, 'a');
|
|
|
|
for (size_t i = 1; i <= kNumBlocks; i++) {
|
|
|
|
ASSERT_OK(Put(ToString(i), value));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
2021-08-03 19:42:22 +00:00
|
|
|
ASSERT_EQ(value, Get(ToString(i)));
|
|
|
|
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
|
|
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT));
|
|
|
|
}
|
2021-09-15 22:32:07 +00:00
|
|
|
// Verify compaction not counted
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), /*begin=*/nullptr,
|
|
|
|
/*end=*/nullptr));
|
|
|
|
EXPECT_EQ(kNumBlocks,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
2021-08-03 19:42:22 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 02:43:44 +00:00
|
|
|
// This test cache data, index and filter blocks during flush.
|
2021-12-08 20:43:09 +00:00
|
|
|
class DBBlockCacheTest1 : public DBTestBase,
|
|
|
|
public ::testing::WithParamInterface<bool> {
|
|
|
|
public:
|
|
|
|
const size_t kNumBlocks = 10;
|
|
|
|
const size_t kValueSize = 100;
|
|
|
|
DBBlockCacheTest1() : DBTestBase("db_block_cache_test1", true) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(DBBlockCacheTest1, DBBlockCacheTest1,
|
|
|
|
::testing::Bool());
|
|
|
|
|
|
|
|
TEST_P(DBBlockCacheTest1, WarmCacheWithBlocksDuringFlush) {
|
2021-08-03 19:42:22 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = NewLRUCache(1 << 25, 0, false);
|
2021-12-08 20:43:09 +00:00
|
|
|
|
|
|
|
bool use_partition = GetParam();
|
|
|
|
if (use_partition) {
|
|
|
|
table_options.partition_filters = true;
|
|
|
|
table_options.index_type =
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
|
|
|
|
}
|
|
|
|
|
2021-08-03 19:42:22 +00:00
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.prepopulate_block_cache =
|
|
|
|
BlockBasedTableOptions::PrepopulateBlockCache::kFlushOnly;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
std::string value(kValueSize, 'a');
|
2021-08-06 02:43:44 +00:00
|
|
|
for (size_t i = 1; i <= kNumBlocks; i++) {
|
2021-08-03 19:42:22 +00:00
|
|
|
ASSERT_OK(Put(ToString(i), value));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
2021-12-08 20:43:09 +00:00
|
|
|
if (use_partition) {
|
|
|
|
ASSERT_EQ(2 * i,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
|
|
|
ASSERT_EQ(2 * i,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
|
|
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
|
|
|
}
|
2021-06-18 04:55:42 +00:00
|
|
|
ASSERT_EQ(value, Get(ToString(i)));
|
2021-08-03 19:42:22 +00:00
|
|
|
|
2021-06-18 04:55:42 +00:00
|
|
|
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
|
|
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT));
|
2021-08-03 19:42:22 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(i * 3, options.statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT));
|
2021-12-08 20:43:09 +00:00
|
|
|
if (use_partition) {
|
|
|
|
ASSERT_EQ(i * 3,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT));
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(i * 2,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT));
|
|
|
|
}
|
2021-08-03 19:42:22 +00:00
|
|
|
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_FILTER_MISS));
|
2021-06-18 04:55:42 +00:00
|
|
|
}
|
2021-12-08 20:43:09 +00:00
|
|
|
|
2021-09-15 22:32:07 +00:00
|
|
|
// Verify compaction not counted
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), /*begin=*/nullptr,
|
|
|
|
/*end=*/nullptr));
|
|
|
|
EXPECT_EQ(kNumBlocks,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
|
|
|
// Index and filter blocks are automatically warmed when the new table file
|
|
|
|
// is automatically opened at the end of compaction. This is not easily
|
|
|
|
// disabled so results in the new index and filter blocks being warmed.
|
2021-12-08 20:43:09 +00:00
|
|
|
if (use_partition) {
|
|
|
|
EXPECT_EQ(2 * (1 + kNumBlocks),
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
|
|
|
EXPECT_EQ(2 * (1 + kNumBlocks),
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
|
|
|
} else {
|
|
|
|
EXPECT_EQ(1 + kNumBlocks,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
|
|
|
EXPECT_EQ(1 + kNumBlocks,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
|
|
|
}
|
2021-06-18 04:55:42 +00:00
|
|
|
}
|
2021-08-06 02:43:44 +00:00
|
|
|
|
|
|
|
TEST_F(DBBlockCacheTest, DynamicallyWarmCacheDuringFlush) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = NewLRUCache(1 << 25, 0, false);
|
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
table_options.prepopulate_block_cache =
|
|
|
|
BlockBasedTableOptions::PrepopulateBlockCache::kFlushOnly;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
std::string value(kValueSize, 'a');
|
|
|
|
|
|
|
|
for (size_t i = 1; i <= 5; i++) {
|
|
|
|
ASSERT_OK(Put(ToString(i), value));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
|
|
|
|
|
|
|
ASSERT_EQ(value, Get(ToString(i)));
|
|
|
|
ASSERT_EQ(0,
|
|
|
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
|
|
|
ASSERT_EQ(
|
|
|
|
0, options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_MISS));
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_HIT));
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"block_based_table_factory", "{prepopulate_block_cache=kDisable;}"}}));
|
|
|
|
|
|
|
|
for (size_t i = 6; i <= kNumBlocks; i++) {
|
|
|
|
ASSERT_OK(Put(ToString(i), value));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ(0,
|
|
|
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
|
|
|
|
|
|
|
ASSERT_EQ(value, Get(ToString(i)));
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
|
|
|
ASSERT_EQ(
|
|
|
|
1, options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_MISS));
|
|
|
|
ASSERT_EQ(0,
|
|
|
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_HIT));
|
|
|
|
}
|
|
|
|
}
|
2021-06-18 04:55:42 +00:00
|
|
|
#endif
|
|
|
|
|
2016-08-23 20:44:13 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// A mock cache wraps LRUCache, and record how many entries have been
|
|
|
|
// inserted for each priority.
|
|
|
|
class MockCache : public LRUCache {
|
|
|
|
public:
|
|
|
|
static uint32_t high_pri_insert_count;
|
|
|
|
static uint32_t low_pri_insert_count;
|
|
|
|
|
2018-05-24 22:45:49 +00:00
|
|
|
MockCache()
|
|
|
|
: LRUCache((size_t)1 << 25 /*capacity*/, 0 /*num_shard_bits*/,
|
|
|
|
false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/) {
|
|
|
|
}
|
2016-08-23 20:44:13 +00:00
|
|
|
|
2021-05-14 05:57:51 +00:00
|
|
|
using ShardedCache::Insert;
|
2021-05-22 01:28:28 +00:00
|
|
|
|
|
|
|
Status Insert(const Slice& key, void* value,
|
|
|
|
const Cache::CacheItemHelper* helper_cb, size_t charge,
|
|
|
|
Handle** handle, Priority priority) override {
|
|
|
|
DeleterFn delete_cb = helper_cb->del_cb;
|
2016-08-23 20:44:13 +00:00
|
|
|
if (priority == Priority::LOW) {
|
|
|
|
low_pri_insert_count++;
|
|
|
|
} else {
|
|
|
|
high_pri_insert_count++;
|
|
|
|
}
|
2021-05-22 01:28:28 +00:00
|
|
|
return LRUCache::Insert(key, value, charge, delete_cb, handle, priority);
|
2016-08-23 20:44:13 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
uint32_t MockCache::high_pri_insert_count = 0;
|
|
|
|
uint32_t MockCache::low_pri_insert_count = 0;
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
TEST_F(DBBlockCacheTest, IndexAndFilterBlocksCachePriority) {
|
|
|
|
for (auto priority : {Cache::Priority::LOW, Cache::Priority::HIGH}) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2016-08-23 20:44:13 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.block_cache.reset(new MockCache());
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
|
|
|
|
table_options.cache_index_and_filter_blocks_with_high_priority =
|
|
|
|
priority == Cache::Priority::HIGH ? true : false;
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-08-23 20:44:13 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
MockCache::high_pri_insert_count = 0;
|
|
|
|
MockCache::low_pri_insert_count = 0;
|
|
|
|
|
|
|
|
// Create a new table.
|
|
|
|
ASSERT_OK(Put("foo", "value"));
|
|
|
|
ASSERT_OK(Put("bar", "value"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(0));
|
|
|
|
|
|
|
|
// index/filter blocks added to block cache right after table creation.
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(2, /* only index/filter were added */
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
|
|
|
|
if (priority == Cache::Priority::LOW) {
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(0u, MockCache::high_pri_insert_count);
|
|
|
|
ASSERT_EQ(2u, MockCache::low_pri_insert_count);
|
2016-08-23 20:44:13 +00:00
|
|
|
} else {
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(2u, MockCache::high_pri_insert_count);
|
|
|
|
ASSERT_EQ(0u, MockCache::low_pri_insert_count);
|
2016-08-23 20:44:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Access data block.
|
|
|
|
ASSERT_EQ("value", Get("foo"));
|
|
|
|
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(3, /*adding data block*/
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
|
|
|
|
|
|
|
|
// Data block should be inserted with low priority.
|
|
|
|
if (priority == Cache::Priority::LOW) {
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(0u, MockCache::high_pri_insert_count);
|
|
|
|
ASSERT_EQ(3u, MockCache::low_pri_insert_count);
|
2016-08-23 20:44:13 +00:00
|
|
|
} else {
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(2u, MockCache::high_pri_insert_count);
|
|
|
|
ASSERT_EQ(1u, MockCache::low_pri_insert_count);
|
2016-08-23 20:44:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 20:18:18 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// An LRUCache wrapper that can falsely report "not found" on Lookup.
|
|
|
|
// This allows us to manipulate BlockBasedTableReader into thinking
|
|
|
|
// another thread inserted the data in between Lookup and Insert,
|
|
|
|
// while mostly preserving the LRUCache interface/behavior.
|
|
|
|
class LookupLiarCache : public CacheWrapper {
|
|
|
|
int nth_lookup_not_found_ = 0;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit LookupLiarCache(std::shared_ptr<Cache> target)
|
|
|
|
: CacheWrapper(std::move(target)) {}
|
|
|
|
|
2021-05-14 05:57:51 +00:00
|
|
|
using Cache::Lookup;
|
2020-04-27 20:18:18 +00:00
|
|
|
Handle* Lookup(const Slice& key, Statistics* stats) override {
|
|
|
|
if (nth_lookup_not_found_ == 1) {
|
|
|
|
nth_lookup_not_found_ = 0;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
if (nth_lookup_not_found_ > 1) {
|
|
|
|
--nth_lookup_not_found_;
|
|
|
|
}
|
|
|
|
return CacheWrapper::Lookup(key, stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1 == next lookup, 2 == after next, etc.
|
|
|
|
void SetNthLookupNotFound(int n) { nth_lookup_not_found_ = n; }
|
|
|
|
};
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
TEST_F(DBBlockCacheTest, AddRedundantStats) {
|
|
|
|
const size_t capacity = size_t{1} << 25;
|
|
|
|
const int num_shard_bits = 0; // 1 shard
|
|
|
|
int iterations_tested = 0;
|
|
|
|
for (std::shared_ptr<Cache> base_cache :
|
|
|
|
{NewLRUCache(capacity, num_shard_bits),
|
|
|
|
NewClockCache(capacity, num_shard_bits)}) {
|
|
|
|
if (!base_cache) {
|
|
|
|
// Skip clock cache when not supported
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++iterations_tested;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
|
|
|
|
std::shared_ptr<LookupLiarCache> cache =
|
|
|
|
std::make_shared<LookupLiarCache>(base_cache);
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.block_cache = cache;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(50));
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2020-04-27 20:18:18 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
// Create a new table.
|
|
|
|
ASSERT_OK(Put("foo", "value"));
|
|
|
|
ASSERT_OK(Put("bar", "value"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(0));
|
|
|
|
|
|
|
|
// Normal access filter+index+data.
|
|
|
|
ASSERT_EQ("value", Get("foo"));
|
|
|
|
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD));
|
|
|
|
// --------
|
|
|
|
ASSERT_EQ(3, TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD_REDUNDANT));
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD_REDUNDANT));
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD_REDUNDANT));
|
|
|
|
// --------
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_ADD_REDUNDANT));
|
|
|
|
|
|
|
|
// Againt access filter+index+data, but force redundant load+insert on index
|
|
|
|
cache->SetNthLookupNotFound(2);
|
|
|
|
ASSERT_EQ("value", Get("bar"));
|
|
|
|
|
|
|
|
ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD));
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD));
|
|
|
|
// --------
|
|
|
|
ASSERT_EQ(4, TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD_REDUNDANT));
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD_REDUNDANT));
|
|
|
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD_REDUNDANT));
|
|
|
|
// --------
|
|
|
|
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_ADD_REDUNDANT));
|
|
|
|
|
|
|
|
// Access just filter (with high probability), and force redundant
|
|
|
|
// load+insert
|
|
|
|
cache->SetNthLookupNotFound(1);
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("this key was not added"));
|
|
|
|
|
|
|
|
EXPECT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD));
|
|
|
|
EXPECT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD));
|
|
|
|
EXPECT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD));
|
|
|
|
// --------
|
|
|
|
EXPECT_EQ(5, TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
|
|
|
|
EXPECT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD_REDUNDANT));
|
|
|
|
EXPECT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD_REDUNDANT));
|
|
|
|
EXPECT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD_REDUNDANT));
|
|
|
|
// --------
|
|
|
|
EXPECT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_ADD_REDUNDANT));
|
|
|
|
|
|
|
|
// Access just data, forcing redundant load+insert
|
|
|
|
ReadOptions read_options;
|
|
|
|
std::unique_ptr<Iterator> iter{db_->NewIterator(read_options)};
|
|
|
|
cache->SetNthLookupNotFound(1);
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ(iter->key(), "bar");
|
|
|
|
|
|
|
|
EXPECT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD));
|
|
|
|
EXPECT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD));
|
|
|
|
EXPECT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD));
|
|
|
|
// --------
|
|
|
|
EXPECT_EQ(6, TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
|
|
|
|
EXPECT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_ADD_REDUNDANT));
|
|
|
|
EXPECT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_ADD_REDUNDANT));
|
|
|
|
EXPECT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_ADD_REDUNDANT));
|
|
|
|
// --------
|
|
|
|
EXPECT_EQ(3, TestGetTickerCount(options, BLOCK_CACHE_ADD_REDUNDANT));
|
|
|
|
}
|
|
|
|
EXPECT_GE(iterations_tested, 1);
|
|
|
|
}
|
|
|
|
|
2016-04-18 16:42:50 +00:00
|
|
|
TEST_F(DBBlockCacheTest, ParanoidFileChecks) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2016-04-18 16:42:50 +00:00
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.paranoid_file_checks = true;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-04-18 16:42:50 +00:00
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "1_key", "val"));
|
|
|
|
ASSERT_OK(Put(1, "9_key", "val"));
|
|
|
|
// Create a new table.
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
ASSERT_EQ(1, /* read and cache data block */
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "1_key2", "val2"));
|
|
|
|
ASSERT_OK(Put(1, "9_key2", "val2"));
|
|
|
|
// Create a new SST file. This will further trigger a compaction
|
|
|
|
// and generate another file.
|
|
|
|
ASSERT_OK(Flush(1));
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-04-18 16:42:50 +00:00
|
|
|
ASSERT_EQ(3, /* Totally 3 files created up to now */
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
|
|
|
|
// After disabling options.paranoid_file_checks. NO further block
|
|
|
|
// is added after generating a new file.
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "false"}}));
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "1_key3", "val3"));
|
|
|
|
ASSERT_OK(Put(1, "9_key3", "val3"));
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
ASSERT_OK(Put(1, "1_key4", "val4"));
|
|
|
|
ASSERT_OK(Put(1, "9_key4", "val4"));
|
|
|
|
ASSERT_OK(Flush(1));
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-04-18 16:42:50 +00:00
|
|
|
ASSERT_EQ(3, /* Totally 3 files created up to now */
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBBlockCacheTest, CompressedCache) {
|
|
|
|
if (!Snappy_Supported()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int num_iter = 80;
|
|
|
|
|
|
|
|
// Run this test three iterations.
|
|
|
|
// Iteration 1: only a uncompressed block cache
|
|
|
|
// Iteration 2: only a compressed block cache
|
|
|
|
// Iteration 3: both block cache and compressed cache
|
|
|
|
// Iteration 4: both block cache and compressed cache, but DB is not
|
|
|
|
// compressed
|
|
|
|
for (int iter = 0; iter < 4; iter++) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 64 * 1024; // small write buffer
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2016-04-18 16:42:50 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
switch (iter) {
|
|
|
|
case 0:
|
|
|
|
// only uncompressed block cache
|
|
|
|
table_options.block_cache = NewLRUCache(8 * 1024);
|
|
|
|
table_options.block_cache_compressed = nullptr;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// no block cache, only compressed cache
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
table_options.block_cache = nullptr;
|
|
|
|
table_options.block_cache_compressed = NewLRUCache(8 * 1024);
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// both compressed and uncompressed block cache
|
|
|
|
table_options.block_cache = NewLRUCache(1024);
|
|
|
|
table_options.block_cache_compressed = NewLRUCache(8 * 1024);
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
// both block cache and compressed cache, but DB is not compressed
|
|
|
|
// also, make block cache sizes bigger, to trigger block cache hits
|
|
|
|
table_options.block_cache = NewLRUCache(1024 * 1024);
|
|
|
|
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
break;
|
|
|
|
default:
|
2017-07-17 04:23:33 +00:00
|
|
|
FAIL();
|
2016-04-18 16:42:50 +00:00
|
|
|
}
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
// default column family doesn't have block cache
|
|
|
|
Options no_block_cache_opts;
|
|
|
|
no_block_cache_opts.statistics = options.statistics;
|
|
|
|
no_block_cache_opts = CurrentOptions(no_block_cache_opts);
|
|
|
|
BlockBasedTableOptions table_options_no_bc;
|
|
|
|
table_options_no_bc.no_block_cache = true;
|
|
|
|
no_block_cache_opts.table_factory.reset(
|
|
|
|
NewBlockBasedTableFactory(table_options_no_bc));
|
|
|
|
ReopenWithColumnFamilies(
|
|
|
|
{"default", "pikachu"},
|
|
|
|
std::vector<Options>({no_block_cache_opts, options}));
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
|
|
|
std::vector<std::string> values;
|
|
|
|
std::string str;
|
|
|
|
for (int i = 0; i < num_iter; i++) {
|
|
|
|
if (i % 4 == 0) { // high compression ratio
|
2020-07-09 21:33:42 +00:00
|
|
|
str = rnd.RandomString(1000);
|
2016-04-18 16:42:50 +00:00
|
|
|
}
|
|
|
|
values.push_back(str);
|
|
|
|
ASSERT_OK(Put(1, Key(i), values[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
// flush all data from memtable so that reads are from block cache
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
|
|
|
|
for (int i = 0; i < num_iter; i++) {
|
|
|
|
ASSERT_EQ(Get(1, Key(i)), values[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that we triggered the appropriate code paths in the cache
|
|
|
|
switch (iter) {
|
|
|
|
case 0:
|
|
|
|
// only uncompressed block cache
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
|
|
|
|
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// no block cache, only compressed cache
|
|
|
|
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// both compressed and uncompressed block cache
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
// both compressed and uncompressed block cache
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_HIT), 0);
|
|
|
|
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
// compressed doesn't have any hits since blocks are not compressed on
|
|
|
|
// storage
|
|
|
|
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0);
|
|
|
|
break;
|
|
|
|
default:
|
2017-07-17 04:23:33 +00:00
|
|
|
FAIL();
|
2016-04-18 16:42:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-24 02:11:08 +00:00
|
|
|
TEST_F(DBBlockCacheTest, CacheCompressionDict) {
|
|
|
|
const int kNumFiles = 4;
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
2019-02-12 03:42:25 +00:00
|
|
|
const int kNumEntriesPerFile = 128;
|
2019-01-24 02:11:08 +00:00
|
|
|
const int kNumBytesPerEntry = 1024;
|
|
|
|
|
|
|
|
// Try all the available libraries that support dictionary compression
|
|
|
|
std::vector<CompressionType> compression_types;
|
2019-12-17 21:52:09 +00:00
|
|
|
if (Zlib_Supported()) {
|
|
|
|
compression_types.push_back(kZlibCompression);
|
|
|
|
}
|
|
|
|
if (LZ4_Supported()) {
|
|
|
|
compression_types.push_back(kLZ4Compression);
|
|
|
|
compression_types.push_back(kLZ4HCCompression);
|
|
|
|
}
|
|
|
|
if (ZSTD_Supported()) {
|
|
|
|
compression_types.push_back(kZSTD);
|
|
|
|
} else if (ZSTDNotFinal_Supported()) {
|
|
|
|
compression_types.push_back(kZSTDNotFinalCompression);
|
|
|
|
}
|
2019-01-24 02:11:08 +00:00
|
|
|
Random rnd(301);
|
|
|
|
for (auto compression_type : compression_types) {
|
|
|
|
Options options = CurrentOptions();
|
2020-11-03 03:20:15 +00:00
|
|
|
options.bottommost_compression = compression_type;
|
|
|
|
options.bottommost_compression_opts.max_dict_bytes = 4096;
|
|
|
|
options.bottommost_compression_opts.enabled = true;
|
2019-01-24 02:11:08 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
options.num_levels = 2;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2019-01-24 02:11:08 +00:00
|
|
|
options.target_file_size_base = kNumEntriesPerFile * kNumBytesPerEntry;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.block_cache.reset(new MockCache());
|
2020-09-14 23:59:00 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2019-01-24 02:11:08 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
2019-07-23 22:57:43 +00:00
|
|
|
RecordCacheCountersForCompressionDict(options);
|
|
|
|
|
2019-01-24 02:11:08 +00:00
|
|
|
for (int i = 0; i < kNumFiles; ++i) {
|
|
|
|
ASSERT_EQ(i, NumTableFilesAtLevel(0, 0));
|
|
|
|
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string value = rnd.RandomString(kNumBytesPerEntry);
|
2019-01-24 02:11:08 +00:00
|
|
|
ASSERT_OK(Put(Key(j * kNumFiles + i), value.c_str()));
|
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
}
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2019-01-24 02:11:08 +00:00
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
|
|
|
ASSERT_EQ(kNumFiles, NumTableFilesAtLevel(1));
|
|
|
|
|
2019-07-23 22:57:43 +00:00
|
|
|
// Compression dictionary blocks are preloaded.
|
|
|
|
CheckCacheCountersForCompressionDict(
|
|
|
|
options, kNumFiles /* expected_compression_dict_misses */,
|
|
|
|
0 /* expected_compression_dict_hits */,
|
|
|
|
kNumFiles /* expected_compression_dict_inserts */);
|
|
|
|
|
2019-01-24 02:11:08 +00:00
|
|
|
// Seek to a key in a file. It should cause the SST's dictionary meta-block
|
|
|
|
// to be read.
|
|
|
|
RecordCacheCounters(options);
|
2019-07-23 22:57:43 +00:00
|
|
|
RecordCacheCountersForCompressionDict(options);
|
2019-01-24 02:11:08 +00:00
|
|
|
ReadOptions read_options;
|
|
|
|
ASSERT_NE("NOT_FOUND", Get(Key(kNumFiles * kNumEntriesPerFile - 1)));
|
2019-07-23 22:57:43 +00:00
|
|
|
// Two block hits: index and dictionary since they are prefetched
|
|
|
|
// One block missed/added: data block
|
|
|
|
CheckCacheCounters(options, 1 /* expected_misses */, 2 /* expected_hits */,
|
|
|
|
1 /* expected_inserts */, 0 /* expected_failures */);
|
|
|
|
CheckCacheCountersForCompressionDict(
|
|
|
|
options, 0 /* expected_compression_dict_misses */,
|
|
|
|
1 /* expected_compression_dict_hits */,
|
|
|
|
0 /* expected_compression_dict_inserts */);
|
2019-01-24 02:11:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
static void ClearCache(Cache* cache) {
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
auto roles = CopyCacheDeleterRoleMap();
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
std::deque<std::string> keys;
|
|
|
|
Cache::ApplyToAllEntriesOptions opts;
|
|
|
|
auto callback = [&](const Slice& key, void* /*value*/, size_t /*charge*/,
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
Cache::DeleterFn deleter) {
|
|
|
|
if (roles.find(deleter) == roles.end()) {
|
|
|
|
// Keep the stats collector
|
|
|
|
return;
|
|
|
|
}
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
keys.push_back(key.ToString());
|
|
|
|
};
|
|
|
|
cache->ApplyToAllEntries(callback, opts);
|
|
|
|
for (auto& k : keys) {
|
|
|
|
cache->Erase(k);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBBlockCacheTest, CacheEntryRoleStats) {
|
|
|
|
const size_t capacity = size_t{1} << 25;
|
|
|
|
int iterations_tested = 0;
|
|
|
|
for (bool partition : {false, true}) {
|
|
|
|
for (std::shared_ptr<Cache> cache :
|
|
|
|
{NewLRUCache(capacity), NewClockCache(capacity)}) {
|
|
|
|
if (!cache) {
|
|
|
|
// Skip clock cache when not supported
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++iterations_tested;
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
2021-06-08 12:02:29 +00:00
|
|
|
SetTimeElapseOnlySleepOnReopen(&options);
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
options.max_open_files = 13;
|
|
|
|
options.table_cache_numshardbits = 0;
|
2021-06-14 15:14:21 +00:00
|
|
|
// If this wakes up, it could interfere with test
|
|
|
|
options.stats_dump_period_sec = 0;
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = cache;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(50));
|
|
|
|
if (partition) {
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
|
|
|
|
table_options.partition_filters = true;
|
|
|
|
}
|
|
|
|
table_options.metadata_cache_options.top_level_index_pinning =
|
|
|
|
PinningTier::kNone;
|
|
|
|
table_options.metadata_cache_options.partition_pinning =
|
|
|
|
PinningTier::kNone;
|
|
|
|
table_options.metadata_cache_options.unpartitioned_pinning =
|
|
|
|
PinningTier::kNone;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
// Create a new table.
|
|
|
|
ASSERT_OK(Put("foo", "value"));
|
|
|
|
ASSERT_OK(Put("bar", "value"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
ASSERT_OK(Put("zfoo", "value"));
|
|
|
|
ASSERT_OK(Put("zbar", "value"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
|
|
|
|
|
|
|
// Fresh cache
|
|
|
|
ClearCache(cache.get());
|
|
|
|
|
|
|
|
std::array<size_t, kNumCacheEntryRoles> expected{};
|
|
|
|
// For CacheEntryStatsCollector
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kMisc)] = 1;
|
2021-06-14 15:14:21 +00:00
|
|
|
EXPECT_EQ(expected, GetCacheEntryRoleCountsBg());
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
2021-06-08 12:02:29 +00:00
|
|
|
std::array<size_t, kNumCacheEntryRoles> prev_expected = expected;
|
|
|
|
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
// First access only filters
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("different from any key added"));
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kFilterBlock)] += 2;
|
|
|
|
if (partition) {
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kFilterMetaBlock)] += 2;
|
|
|
|
}
|
2021-06-08 12:02:29 +00:00
|
|
|
// Within some time window, we will get cached entry stats
|
2021-06-14 15:14:21 +00:00
|
|
|
EXPECT_EQ(prev_expected, GetCacheEntryRoleCountsBg());
|
2021-06-08 12:02:29 +00:00
|
|
|
// Not enough to force a miss
|
2021-06-14 15:14:21 +00:00
|
|
|
env_->MockSleepForSeconds(45);
|
|
|
|
EXPECT_EQ(prev_expected, GetCacheEntryRoleCountsBg());
|
2021-06-08 12:02:29 +00:00
|
|
|
// Enough to force a miss
|
2021-06-14 15:14:21 +00:00
|
|
|
env_->MockSleepForSeconds(601);
|
|
|
|
EXPECT_EQ(expected, GetCacheEntryRoleCountsBg());
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
|
|
|
// Now access index and data block
|
|
|
|
ASSERT_EQ("value", Get("foo"));
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kIndexBlock)]++;
|
|
|
|
if (partition) {
|
|
|
|
// top-level
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kIndexBlock)]++;
|
|
|
|
}
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kDataBlock)]++;
|
2021-06-14 15:14:21 +00:00
|
|
|
// Enough to force a miss
|
|
|
|
env_->MockSleepForSeconds(601);
|
|
|
|
// But inject a simulated long scan so that we need a longer
|
|
|
|
// interval to force a miss next time.
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"CacheEntryStatsCollector::GetStats:AfterApplyToAllEntries",
|
|
|
|
[this](void*) {
|
|
|
|
// To spend no more than 0.2% of time scanning, we would need
|
|
|
|
// interval of at least 10000s
|
|
|
|
env_->MockSleepForSeconds(20);
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
EXPECT_EQ(expected, GetCacheEntryRoleCountsBg());
|
|
|
|
prev_expected = expected;
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
|
|
|
// The same for other file
|
|
|
|
ASSERT_EQ("value", Get("zfoo"));
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kIndexBlock)]++;
|
|
|
|
if (partition) {
|
|
|
|
// top-level
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kIndexBlock)]++;
|
|
|
|
}
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kDataBlock)]++;
|
2021-06-14 15:14:21 +00:00
|
|
|
// Because of the simulated long scan, this is not enough to force
|
|
|
|
// a miss
|
|
|
|
env_->MockSleepForSeconds(601);
|
|
|
|
EXPECT_EQ(prev_expected, GetCacheEntryRoleCountsBg());
|
|
|
|
// But this is enough
|
|
|
|
env_->MockSleepForSeconds(10000);
|
|
|
|
EXPECT_EQ(expected, GetCacheEntryRoleCountsBg());
|
|
|
|
prev_expected = expected;
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
|
|
|
// Also check the GetProperty interface
|
|
|
|
std::map<std::string, std::string> values;
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
|
|
|
|
|
|
|
|
EXPECT_EQ(
|
|
|
|
ToString(expected[static_cast<size_t>(CacheEntryRole::kIndexBlock)]),
|
|
|
|
values["count.index-block"]);
|
|
|
|
EXPECT_EQ(
|
|
|
|
ToString(expected[static_cast<size_t>(CacheEntryRole::kDataBlock)]),
|
|
|
|
values["count.data-block"]);
|
|
|
|
EXPECT_EQ(
|
|
|
|
ToString(expected[static_cast<size_t>(CacheEntryRole::kFilterBlock)]),
|
|
|
|
values["count.filter-block"]);
|
2021-06-14 15:14:21 +00:00
|
|
|
EXPECT_EQ(
|
|
|
|
ToString(
|
|
|
|
prev_expected[static_cast<size_t>(CacheEntryRole::kWriteBuffer)]),
|
|
|
|
values["count.write-buffer"]);
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
EXPECT_EQ(ToString(expected[static_cast<size_t>(CacheEntryRole::kMisc)]),
|
|
|
|
values["count.misc"]);
|
2021-06-14 15:14:21 +00:00
|
|
|
|
|
|
|
// Add one for kWriteBuffer
|
|
|
|
{
|
|
|
|
WriteBufferManager wbm(size_t{1} << 20, cache);
|
|
|
|
wbm.ReserveMem(1024);
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kWriteBuffer)]++;
|
|
|
|
// Now we check that the GetProperty interface is more agressive about
|
|
|
|
// re-scanning stats, but not totally aggressive.
|
|
|
|
// Within some time window, we will get cached entry stats
|
|
|
|
env_->MockSleepForSeconds(1);
|
|
|
|
EXPECT_EQ(ToString(prev_expected[static_cast<size_t>(
|
|
|
|
CacheEntryRole::kWriteBuffer)]),
|
|
|
|
values["count.write-buffer"]);
|
|
|
|
// Not enough for a "background" miss but enough for a "foreground" miss
|
|
|
|
env_->MockSleepForSeconds(45);
|
|
|
|
|
|
|
|
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats,
|
|
|
|
&values));
|
|
|
|
EXPECT_EQ(
|
|
|
|
ToString(
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kWriteBuffer)]),
|
|
|
|
values["count.write-buffer"]);
|
|
|
|
}
|
|
|
|
prev_expected = expected;
|
|
|
|
|
|
|
|
// With collector pinned in cache, we should be able to hit
|
|
|
|
// even if the cache is full
|
|
|
|
ClearCache(cache.get());
|
|
|
|
Cache::Handle* h = nullptr;
|
|
|
|
ASSERT_OK(cache->Insert("Fill-it-up", nullptr, capacity + 1,
|
|
|
|
GetNoopDeleterForRole<CacheEntryRole::kMisc>(),
|
|
|
|
&h, Cache::Priority::HIGH));
|
|
|
|
ASSERT_GT(cache->GetUsage(), cache->GetCapacity());
|
|
|
|
expected = {};
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
// For CacheEntryStatsCollector
|
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kMisc)] = 1;
|
|
|
|
// For Fill-it-up
|
2021-06-14 15:14:21 +00:00
|
|
|
expected[static_cast<size_t>(CacheEntryRole::kMisc)]++;
|
|
|
|
// Still able to hit on saved stats
|
|
|
|
EXPECT_EQ(prev_expected, GetCacheEntryRoleCountsBg());
|
|
|
|
// Enough to force a miss
|
|
|
|
env_->MockSleepForSeconds(1000);
|
|
|
|
EXPECT_EQ(expected, GetCacheEntryRoleCountsBg());
|
|
|
|
|
|
|
|
cache->Release(h);
|
Don't hold DB mutex for block cache entry stat scans (#8538)
Summary:
I previously didn't notice the DB mutex was being held during
block cache entry stat scans, probably because I primarily checked for
read performance regressions, because they require the block cache and
are traditionally latency-sensitive.
This change does some refactoring to avoid holding DB mutex and to
avoid triggering and waiting for a scan in GetProperty("rocksdb.cfstats").
Some tests have to be updated because now the stats collector is
populated in the Cache aggressively on DB startup rather than lazily.
(I hope to clean up some of this added complexity in the future.)
This change also ensures proper treatment of need_out_of_mutex for
non-int DB properties.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8538
Test Plan:
Added unit test logic that uses sync points to fail if the DB mutex
is held during a scan, covering the various ways that a scan might be
triggered.
Performance test - the known impact to holding the DB mutex is on
TransactionDB, and the easiest way to see the impact is to hack the
scan code to almost always miss and take an artificially long time
scanning. Here I've injected an unconditional 5s sleep at the call to
ApplyToAllEntries.
Before (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 433.219 micros/op 2308 ops/sec; 0.1 MB/s ( transactions:78999 aborts:0)
rocksdb.db.write.micros P50 : 16.135883 P95 : 36.622503 P99 : 66.036115 P100 : 5000614.000000 COUNT : 149677 SUM : 8364856
$ TEST_TMPDIR=/dev/shm ./db_bench.base_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 448.802 micros/op 2228 ops/sec; 0.1 MB/s ( transactions:75999 aborts:0)
rocksdb.db.write.micros P50 : 16.629221 P95 : 37.320607 P99 : 72.144341 P100 : 5000871.000000 COUNT : 143995 SUM : 13472323
Notice the 5s P100 write time.
After (hacked):
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 303.645 micros/op 3293 ops/sec; 0.1 MB/s ( transactions:98999 aborts:0)
rocksdb.db.write.micros P50 : 16.061871 P95 : 33.978834 P99 : 60.018017 P100 : 616315.000000 COUNT : 187619 SUM : 4097407
$ TEST_TMPDIR=/dev/shm ./db_bench.new_xxx -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 310.383 micros/op 3221 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.270026 P95 : 35.786844 P99 : 64.302878 P100 : 603088.000000 COUNT : 183819 SUM : 4095918
P100 write is now ~0.6s. Not good, but it's the same even if I completely bypass all the scanning code:
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 311.365 micros/op 3211 ops/sec; 0.1 MB/s ( transactions:96999 aborts:0)
rocksdb.db.write.micros P50 : 16.274362 P95 : 36.221184 P99 : 68.809783 P100 : 649808.000000 COUNT : 183819 SUM : 4156767
$ TEST_TMPDIR=/dev/shm ./db_bench.new_skip -benchmarks=randomtransaction,stats -cache_index_and_filter_blocks=1 -bloom_bits=10 -partition_index_and_filters=1 -duration=30 -stats_dump_period_sec=12 -cache_size=100000000 -statistics -transaction_db 2>&1 | egrep 'db.db.write.micros|micros/op'
randomtransaction : 308.395 micros/op 3242 ops/sec; 0.1 MB/s ( transactions:97999 aborts:0)
rocksdb.db.write.micros P50 : 16.106222 P95 : 37.202403 P99 : 67.081875 P100 : 598091.000000 COUNT : 185714 SUM : 4098832
No substantial difference.
Reviewed By: siying
Differential Revision: D29738847
Pulled By: pdillinger
fbshipit-source-id: 1c5c155f5a1b62e4fea0fd4eeb515a8b7474027b
2021-07-16 21:12:06 +00:00
|
|
|
|
|
|
|
// Now we test that the DB mutex is not held during scans, for the ways
|
|
|
|
// we know how to (possibly) trigger them. Without a better good way to
|
|
|
|
// check this, we simply inject an acquire & release of the DB mutex
|
|
|
|
// deep in the stat collection code. If we were already holding the
|
|
|
|
// mutex, that is UB that would at least be found by TSAN.
|
|
|
|
int scan_count = 0;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"CacheEntryStatsCollector::GetStats:AfterApplyToAllEntries",
|
|
|
|
[this, &scan_count](void*) {
|
|
|
|
dbfull()->TEST_LockMutex();
|
|
|
|
dbfull()->TEST_UnlockMutex();
|
|
|
|
++scan_count;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
// Different things that might trigger a scan, with mock sleeps to
|
|
|
|
// force a miss.
|
|
|
|
env_->MockSleepForSeconds(10000);
|
|
|
|
dbfull()->DumpStats();
|
|
|
|
ASSERT_EQ(scan_count, 1);
|
|
|
|
|
|
|
|
env_->MockSleepForSeconds(10000);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
|
|
|
|
ASSERT_EQ(scan_count, 2);
|
|
|
|
|
|
|
|
env_->MockSleepForSeconds(10000);
|
|
|
|
std::string value_str;
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetProperty(DB::Properties::kBlockCacheEntryStats, &value_str));
|
|
|
|
ASSERT_EQ(scan_count, 3);
|
|
|
|
|
|
|
|
env_->MockSleepForSeconds(10000);
|
|
|
|
ASSERT_TRUE(db_->GetProperty(DB::Properties::kCFStats, &value_str));
|
|
|
|
// To match historical speed, querying this property no longer triggers
|
|
|
|
// a scan, even if results are old. But periodic dump stats should keep
|
|
|
|
// things reasonably updated.
|
|
|
|
ASSERT_EQ(scan_count, /*unchanged*/ 3);
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
}
|
|
|
|
EXPECT_GE(iterations_tested, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-18 18:34:11 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
2016-03-11 01:35:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
class DBBlockCacheKeyTest
|
|
|
|
: public DBTestBase,
|
|
|
|
public testing::WithParamInterface<std::tuple<bool, bool>> {
|
|
|
|
public:
|
|
|
|
DBBlockCacheKeyTest()
|
|
|
|
: DBTestBase("db_block_cache_test", /*env_do_fsync=*/false) {}
|
|
|
|
|
|
|
|
void SetUp() override {
|
|
|
|
use_compressed_cache_ = std::get<0>(GetParam());
|
|
|
|
exclude_file_numbers_ = std::get<1>(GetParam());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool use_compressed_cache_;
|
|
|
|
bool exclude_file_numbers_;
|
|
|
|
};
|
|
|
|
|
2021-08-17 03:36:19 +00:00
|
|
|
// Disable LinkFile so that we can physically copy a DB using Checkpoint.
|
|
|
|
// Disable file GetUniqueId to enable stable cache keys.
|
|
|
|
class StableCacheKeyTestFS : public FaultInjectionTestFS {
|
|
|
|
public:
|
|
|
|
explicit StableCacheKeyTestFS(const std::shared_ptr<FileSystem>& base)
|
|
|
|
: FaultInjectionTestFS(base) {
|
|
|
|
SetFailGetUniqueId(true);
|
|
|
|
}
|
|
|
|
|
2021-08-18 18:32:00 +00:00
|
|
|
virtual ~StableCacheKeyTestFS() override {}
|
2021-08-17 03:36:19 +00:00
|
|
|
|
|
|
|
IOStatus LinkFile(const std::string&, const std::string&, const IOOptions&,
|
|
|
|
IODebugContext*) override {
|
|
|
|
return IOStatus::NotSupported("Disabled");
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
|
2021-08-17 03:36:19 +00:00
|
|
|
std::shared_ptr<StableCacheKeyTestFS> test_fs{
|
|
|
|
new StableCacheKeyTestFS(env_->GetFileSystem())};
|
|
|
|
std::unique_ptr<CompositeEnvWrapper> test_env{
|
|
|
|
new CompositeEnvWrapper(env_, test_fs)};
|
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
options.env = test_env.get();
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
int key_count = 0;
|
|
|
|
uint64_t expected_stat = 0;
|
|
|
|
|
|
|
|
std::function<void()> verify_stats;
|
|
|
|
if (use_compressed_cache_) {
|
|
|
|
if (!Snappy_Supported()) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Compressed cache test requires snappy support");
|
|
|
|
return;
|
2021-08-17 03:36:19 +00:00
|
|
|
}
|
2021-08-21 03:39:52 +00:00
|
|
|
options.compression = CompressionType::kSnappyCompression;
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
table_options.block_cache_compressed = NewLRUCache(1 << 25, 0, false);
|
|
|
|
verify_stats = [&options, &expected_stat] {
|
|
|
|
// One for ordinary SST file and one for external SST file
|
|
|
|
ASSERT_EQ(expected_stat,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_ADD));
|
|
|
|
};
|
|
|
|
} else {
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.block_cache = NewLRUCache(1 << 25, 0, false);
|
|
|
|
verify_stats = [&options, &expected_stat] {
|
|
|
|
ASSERT_EQ(expected_stat,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
|
|
|
ASSERT_EQ(expected_stat,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
|
|
|
ASSERT_EQ(expected_stat,
|
|
|
|
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
|
|
|
};
|
|
|
|
}
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
CreateAndReopenWithCF({"koko"}, options);
|
|
|
|
|
|
|
|
if (exclude_file_numbers_) {
|
|
|
|
// Simulate something like old behavior without file numbers in properties.
|
|
|
|
// This is a "control" side of the test that also ensures safely degraded
|
|
|
|
// behavior on old files.
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"PropertyBlockBuilder::AddTableProperty:Start", [&](void* arg) {
|
|
|
|
TableProperties* props = reinterpret_cast<TableProperties*>(arg);
|
|
|
|
props->orig_file_number = 0;
|
|
|
|
});
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
}
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
std::function<void()> perform_gets = [&key_count, &expected_stat, this]() {
|
|
|
|
if (exclude_file_numbers_) {
|
|
|
|
// No cache key reuse should happen, because we can't rely on current
|
|
|
|
// file number being stable
|
|
|
|
expected_stat += key_count;
|
|
|
|
} else {
|
|
|
|
// Cache keys should be stable
|
|
|
|
expected_stat = key_count;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < key_count; ++i) {
|
|
|
|
ASSERT_EQ(Get(1, Key(i)), "abc");
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Ordinary SST files with same session id
|
|
|
|
const std::string something_compressible(500U, 'x');
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
ASSERT_OK(Put(1, Key(key_count), "abc"));
|
|
|
|
ASSERT_OK(Put(1, Key(key_count) + "a", something_compressible));
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
++key_count;
|
|
|
|
}
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-18 18:32:00 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2021-08-21 03:39:52 +00:00
|
|
|
// Save an export of those ordinary SST files for later
|
|
|
|
std::string export_files_dir = dbname_ + "/exported";
|
|
|
|
ExportImportFilesMetaData* metadata_ptr_ = nullptr;
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->ExportColumnFamily(handles_[1], export_files_dir,
|
|
|
|
&metadata_ptr_));
|
|
|
|
ASSERT_NE(metadata_ptr_, nullptr);
|
|
|
|
delete checkpoint;
|
|
|
|
checkpoint = nullptr;
|
|
|
|
|
|
|
|
// External SST files with same session id
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::vector<std::string> external;
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
std::string f = dbname_ + "/external" + ToString(i) + ".sst";
|
|
|
|
external.push_back(f);
|
|
|
|
ASSERT_OK(sst_file_writer.Open(f));
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(key_count), "abc"));
|
|
|
|
ASSERT_OK(
|
|
|
|
sst_file_writer.Put(Key(key_count) + "a", something_compressible));
|
|
|
|
++key_count;
|
|
|
|
ExternalSstFileInfo external_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&external_info));
|
|
|
|
IngestExternalFileOptions ingest_opts;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(handles_[1], {f}, ingest_opts));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exclude_file_numbers_) {
|
|
|
|
// FIXME(peterd): figure out where these extra two ADDs are coming from
|
|
|
|
options.statistics->recordTick(BLOCK_CACHE_INDEX_ADD,
|
|
|
|
uint64_t{0} - uint64_t{2});
|
|
|
|
options.statistics->recordTick(BLOCK_CACHE_FILTER_ADD,
|
|
|
|
uint64_t{0} - uint64_t{2});
|
|
|
|
options.statistics->recordTick(BLOCK_CACHE_COMPRESSED_ADD,
|
|
|
|
uint64_t{0} - uint64_t{2});
|
|
|
|
}
|
2021-08-18 18:32:00 +00:00
|
|
|
#endif
|
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
perform_gets();
|
|
|
|
verify_stats();
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
// Make sure we can cache hit after re-open
|
|
|
|
ReopenWithColumnFamilies({"default", "koko"}, options);
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
perform_gets();
|
|
|
|
verify_stats();
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
// Make sure we can cache hit even on a full copy of the DB. Using
|
|
|
|
// StableCacheKeyTestFS, Checkpoint will resort to full copy not hard link.
|
|
|
|
// (Checkpoint not available in LITE mode to test this.)
|
2021-08-17 03:36:19 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2021-08-21 03:39:52 +00:00
|
|
|
auto db_copy_name = dbname_ + "-copy";
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(db_copy_name));
|
|
|
|
delete checkpoint;
|
2021-08-18 18:32:00 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
Close();
|
|
|
|
Destroy(options);
|
2021-08-18 18:32:00 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
// Switch to the DB copy
|
|
|
|
SaveAndRestore<std::string> save_dbname(&dbname_, db_copy_name);
|
|
|
|
ReopenWithColumnFamilies({"default", "koko"}, options);
|
|
|
|
|
|
|
|
perform_gets();
|
|
|
|
verify_stats();
|
2021-08-17 03:36:19 +00:00
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
// And ensure that re-importing + ingesting the same files into a
|
|
|
|
// different DB uses same cache keys
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
ColumnFamilyHandle* cfh = nullptr;
|
|
|
|
ASSERT_OK(db_->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
|
|
|
ImportColumnFamilyOptions(),
|
|
|
|
*metadata_ptr_, &cfh));
|
|
|
|
ASSERT_NE(cfh, nullptr);
|
|
|
|
delete cfh;
|
|
|
|
cfh = nullptr;
|
|
|
|
delete metadata_ptr_;
|
|
|
|
metadata_ptr_ = nullptr;
|
|
|
|
|
|
|
|
DestroyDB(export_files_dir, options);
|
|
|
|
|
|
|
|
ReopenWithColumnFamilies({"default", "yoyo"}, options);
|
|
|
|
|
|
|
|
IngestExternalFileOptions ingest_opts;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(handles_[1], {external}, ingest_opts));
|
|
|
|
|
|
|
|
if (exclude_file_numbers_) {
|
|
|
|
// FIXME(peterd): figure out where these extra two ADDs are coming from
|
|
|
|
options.statistics->recordTick(BLOCK_CACHE_INDEX_ADD,
|
|
|
|
uint64_t{0} - uint64_t{2});
|
|
|
|
options.statistics->recordTick(BLOCK_CACHE_FILTER_ADD,
|
|
|
|
uint64_t{0} - uint64_t{2});
|
2021-08-17 03:36:19 +00:00
|
|
|
}
|
2021-08-21 03:39:52 +00:00
|
|
|
|
|
|
|
perform_gets();
|
|
|
|
verify_stats();
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
Close();
|
|
|
|
Destroy(options);
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2021-08-17 03:36:19 +00:00
|
|
|
}
|
|
|
|
|
2021-08-21 03:39:52 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(DBBlockCacheKeyTest, DBBlockCacheKeyTest,
|
|
|
|
::testing::Combine(::testing::Bool(),
|
|
|
|
::testing::Bool()));
|
|
|
|
|
2020-10-11 21:52:49 +00:00
|
|
|
class DBBlockCachePinningTest
|
|
|
|
: public DBTestBase,
|
|
|
|
public testing::WithParamInterface<
|
|
|
|
std::tuple<bool, PinningTier, PinningTier, PinningTier>> {
|
|
|
|
public:
|
|
|
|
DBBlockCachePinningTest()
|
2021-07-23 15:37:27 +00:00
|
|
|
: DBTestBase("db_block_cache_test", /*env_do_fsync=*/false) {}
|
2020-10-11 21:52:49 +00:00
|
|
|
|
|
|
|
void SetUp() override {
|
|
|
|
partition_index_and_filters_ = std::get<0>(GetParam());
|
|
|
|
top_level_index_pinning_ = std::get<1>(GetParam());
|
|
|
|
partition_pinning_ = std::get<2>(GetParam());
|
|
|
|
unpartitioned_pinning_ = std::get<3>(GetParam());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool partition_index_and_filters_;
|
|
|
|
PinningTier top_level_index_pinning_;
|
|
|
|
PinningTier partition_pinning_;
|
|
|
|
PinningTier unpartitioned_pinning_;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(DBBlockCachePinningTest, TwoLevelDB) {
|
|
|
|
// Creates one file in L0 and one file in L1. Both files have enough data that
|
|
|
|
// their index and filter blocks are partitioned. The L1 file will also have
|
|
|
|
// a compression dictionary (those are trained only during compaction), which
|
|
|
|
// must be unpartitioned.
|
|
|
|
const int kKeySize = 32;
|
|
|
|
const int kBlockSize = 128;
|
|
|
|
const int kNumBlocksPerFile = 128;
|
|
|
|
const int kNumKeysPerFile = kBlockSize * kNumBlocksPerFile / kKeySize;
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
// `kNoCompression` makes the unit test more portable. But it relies on the
|
|
|
|
// current behavior of persisting/accessing dictionary even when there's no
|
|
|
|
// (de)compression happening, which seems fairly likely to change over time.
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.compression_opts.max_dict_bytes = 4 << 10;
|
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = NewLRUCache(1 << 20 /* capacity */);
|
|
|
|
table_options.block_size = kBlockSize;
|
|
|
|
table_options.metadata_block_size = kBlockSize;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.metadata_cache_options.top_level_index_pinning =
|
|
|
|
top_level_index_pinning_;
|
|
|
|
table_options.metadata_cache_options.partition_pinning = partition_pinning_;
|
|
|
|
table_options.metadata_cache_options.unpartitioned_pinning =
|
|
|
|
unpartitioned_pinning_;
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
NewBloomFilterPolicy(10 /* bits_per_key */));
|
|
|
|
if (partition_index_and_filters_) {
|
|
|
|
table_options.index_type =
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
|
|
|
|
table_options.partition_filters = true;
|
|
|
|
}
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
|
|
|
ASSERT_OK(Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kKeySize)));
|
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
if (i == 0) {
|
|
|
|
// Prevent trivial move so file will be rewritten with dictionary and
|
|
|
|
// reopened with L1's pinning settings.
|
|
|
|
CompactRangeOptions cro;
|
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
|
|
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear all unpinned blocks so unpinned blocks will show up as cache misses
|
|
|
|
// when reading a key from a file.
|
|
|
|
table_options.block_cache->EraseUnRefEntries();
|
|
|
|
|
|
|
|
// Get base cache values
|
|
|
|
uint64_t filter_misses = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
|
|
|
|
uint64_t index_misses = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS);
|
|
|
|
uint64_t compression_dict_misses =
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
|
|
|
|
|
|
|
|
// Read a key from the L0 file
|
|
|
|
Get(Key(kNumKeysPerFile));
|
|
|
|
uint64_t expected_filter_misses = filter_misses;
|
|
|
|
uint64_t expected_index_misses = index_misses;
|
|
|
|
uint64_t expected_compression_dict_misses = compression_dict_misses;
|
|
|
|
if (partition_index_and_filters_) {
|
|
|
|
if (top_level_index_pinning_ == PinningTier::kNone) {
|
|
|
|
++expected_filter_misses;
|
|
|
|
++expected_index_misses;
|
|
|
|
}
|
|
|
|
if (partition_pinning_ == PinningTier::kNone) {
|
|
|
|
++expected_filter_misses;
|
|
|
|
++expected_index_misses;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unpartitioned_pinning_ == PinningTier::kNone) {
|
|
|
|
++expected_filter_misses;
|
|
|
|
++expected_index_misses;
|
|
|
|
}
|
|
|
|
}
|
2020-11-03 03:20:15 +00:00
|
|
|
if (unpartitioned_pinning_ == PinningTier::kNone) {
|
|
|
|
++expected_compression_dict_misses;
|
|
|
|
}
|
2020-10-11 21:52:49 +00:00
|
|
|
ASSERT_EQ(expected_filter_misses,
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(expected_index_misses,
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(expected_compression_dict_misses,
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
|
|
|
|
|
|
|
|
// Clear all unpinned blocks so unpinned blocks will show up as cache misses
|
|
|
|
// when reading a key from a file.
|
|
|
|
table_options.block_cache->EraseUnRefEntries();
|
|
|
|
|
|
|
|
// Read a key from the L1 file
|
|
|
|
Get(Key(0));
|
|
|
|
if (partition_index_and_filters_) {
|
|
|
|
if (top_level_index_pinning_ == PinningTier::kNone ||
|
|
|
|
top_level_index_pinning_ == PinningTier::kFlushedAndSimilar) {
|
|
|
|
++expected_filter_misses;
|
|
|
|
++expected_index_misses;
|
|
|
|
}
|
|
|
|
if (partition_pinning_ == PinningTier::kNone ||
|
|
|
|
partition_pinning_ == PinningTier::kFlushedAndSimilar) {
|
|
|
|
++expected_filter_misses;
|
|
|
|
++expected_index_misses;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unpartitioned_pinning_ == PinningTier::kNone ||
|
|
|
|
unpartitioned_pinning_ == PinningTier::kFlushedAndSimilar) {
|
|
|
|
++expected_filter_misses;
|
|
|
|
++expected_index_misses;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (unpartitioned_pinning_ == PinningTier::kNone ||
|
|
|
|
unpartitioned_pinning_ == PinningTier::kFlushedAndSimilar) {
|
|
|
|
++expected_compression_dict_misses;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(expected_filter_misses,
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(expected_index_misses,
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(expected_compression_dict_misses,
|
|
|
|
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
|
|
|
|
}
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
DBBlockCachePinningTest, DBBlockCachePinningTest,
|
|
|
|
::testing::Combine(
|
|
|
|
::testing::Bool(),
|
|
|
|
::testing::Values(PinningTier::kNone, PinningTier::kFlushedAndSimilar,
|
|
|
|
PinningTier::kAll),
|
|
|
|
::testing::Values(PinningTier::kNone, PinningTier::kFlushedAndSimilar,
|
|
|
|
PinningTier::kAll),
|
|
|
|
::testing::Values(PinningTier::kNone, PinningTier::kFlushedAndSimilar,
|
|
|
|
PinningTier::kAll)));
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-03-11 01:35:19 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2016-03-11 01:35:19 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|