2020-03-13 04:39:36 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
|
|
#include "table/block_based/reader_common.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// Encapsulates common functionality for the various index reader
|
|
|
|
// implementations. Provides access to the index block regardless of whether
|
|
|
|
// it is owned by the reader or stored in the cache, or whether it is pinned
|
|
|
|
// in the cache or not.
|
|
|
|
class BlockBasedTable::IndexReaderCommon : public BlockBasedTable::IndexReader {
|
|
|
|
public:
|
|
|
|
IndexReaderCommon(const BlockBasedTable* t,
|
|
|
|
CachableEntry<Block>&& index_block)
|
|
|
|
: table_(t), index_block_(std::move(index_block)) {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
}
|
|
|
|
|
Support pro-actively erasing obsolete block cache entries (#12694)
Summary:
Currently, when files become obsolete, the block cache entries associated with them just age out naturally. With pure LRU, this is not too bad, as once you "use" enough cache entries to (re-)fill the cache, you are guranteed to have purged the obsolete entries. However, HyperClockCache is a counting clock cache with a somewhat longer memory, so could be more negatively impacted by previously-hot cache entries becoming obsolete, and taking longer to age out than newer single-hit entries.
Part of the reason we still have this natural aging-out is that there's almost no connection between block cache entries and the file they are associated with. Everything is hashed into the same pool(s) of entries with nothing like a secondary index based on file. Keeping track of such an index could be expensive.
This change adds a new, mutable CF option `uncache_aggressiveness` for erasing obsolete block cache entries. The process can be speculative, lossy, or unproductive because not all potential block cache entries associated with files will be resident in memory, and attempting to remove them all could be wasted CPU time. Rather than a simple on/off switch, `uncache_aggressiveness` basically tells RocksDB how much CPU you're willing to burn trying to purge obsolete block cache entries. When such efforts are not sufficiently productive for a file, we stop and move on.
The option is in ColumnFamilyOptions so that it is dynamically changeable for already-open files, and customizeable by CF.
Note that this block cache removal happens as part of the process of purging obsolete files, which is often in a background thread (depending on `background_purge_on_iterator_cleanup` and `avoid_unnecessary_blocking_io` options) rather than along CPU critical paths.
Notable auxiliary code details:
* Possibly fixing some issues with trivial moves with `only_delete_metadata`: unnecessary TableCache::Evict in that case and missing from the ObsoleteFileInfo move operator. (Not able to reproduce an current failure.)
* Remove suspicious TableCache::Erase() from VersionSet::AddObsoleteBlobFile() (TODO follow-up item)
Marked EXPERIMENTAL until more thorough validation is complete.
Direct stats of this functionality are omitted because they could be misleading. Block cache hit rate is a better indicator of benefit, and CPU profiling a better indicator of cost.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12694
Test Plan:
* Unit tests added, including refactoring an existing test to make better use of parameterized tests.
* Added to crash test.
* Performance, sample command:
```
for I in `seq 1 10`; do for UA in 300; do for CT in lru_cache fixed_hyper_clock_cache auto_hyper_clock_cache; do rm -rf /dev/shm/test3; TEST_TMPDIR=/dev/shm/test3 /usr/bin/time ./db_bench -benchmarks=readwhilewriting -num=13000000 -read_random_exp_range=6 -write_buffer_size=10000000 -bloom_bits=10 -cache_type=$CT -cache_size=390000000 -cache_index_and_filter_blocks=1 -disable_wal=1 -duration=60 -statistics -uncache_aggressiveness=$UA 2>&1 | grep -E 'micros/op|rocksdb.block.cache.data.(hit|miss)|rocksdb.number.keys.(read|written)|maxresident' | awk '/rocksdb.block.cache.data.miss/ { miss = $4 } /rocksdb.block.cache.data.hit/ { hit = $4 } { print } END { print "hit rate = " ((hit * 1.0) / (miss + hit)) }' | tee -a results-$CT-$UA; done; done; done
```
Averaging 10 runs each case, block cache data block hit rates
```
lru_cache
UA=0 -> hit rate = 0.327, ops/s = 87668, user CPU sec = 139.0
UA=300 -> hit rate = 0.336, ops/s = 87960, user CPU sec = 139.0
fixed_hyper_clock_cache
UA=0 -> hit rate = 0.336, ops/s = 100069, user CPU sec = 139.9
UA=300 -> hit rate = 0.343, ops/s = 100104, user CPU sec = 140.2
auto_hyper_clock_cache
UA=0 -> hit rate = 0.336, ops/s = 97580, user CPU sec = 140.5
UA=300 -> hit rate = 0.345, ops/s = 97972, user CPU sec = 139.8
```
Conclusion: up to roughly 1 percentage point of improved block cache hit rate, likely leading to overall improved efficiency (because the foreground CPU cost of cache misses likely outweighs the background CPU cost of erasure, let alone I/O savings).
Reviewed By: ajkr
Differential Revision: D57932442
Pulled By: pdillinger
fbshipit-source-id: 84a243ca5f965f731f346a4853009780a904af6c
2024-06-07 15:57:11 +00:00
|
|
|
void EraseFromCacheBeforeDestruction(
|
|
|
|
uint32_t /*uncache_aggressiveness*/) override;
|
|
|
|
|
2020-03-13 04:39:36 +00:00
|
|
|
protected:
|
|
|
|
static Status ReadIndexBlock(const BlockBasedTable* table,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const ReadOptions& read_options, bool use_cache,
|
|
|
|
GetContext* get_context,
|
|
|
|
BlockCacheLookupContext* lookup_context,
|
|
|
|
CachableEntry<Block>* index_block);
|
|
|
|
|
|
|
|
const BlockBasedTable* table() const { return table_; }
|
|
|
|
|
|
|
|
const InternalKeyComparator* internal_comparator() const {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
assert(table_->get_rep() != nullptr);
|
|
|
|
|
|
|
|
return &table_->get_rep()->internal_comparator;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool index_has_first_key() const {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
assert(table_->get_rep() != nullptr);
|
|
|
|
return table_->get_rep()->index_has_first_key;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool index_key_includes_seq() const {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
assert(table_->get_rep() != nullptr);
|
|
|
|
return table_->get_rep()->index_key_includes_seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool index_value_is_full() const {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
assert(table_->get_rep() != nullptr);
|
|
|
|
return table_->get_rep()->index_value_is_full;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool cache_index_blocks() const {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
assert(table_->get_rep() != nullptr);
|
|
|
|
return table_->get_rep()->table_options.cache_index_and_filter_blocks;
|
|
|
|
}
|
|
|
|
|
Add support to strip / pad timestamp when creating / reading a block based table (#11495)
Summary:
Add support to strip timestamp in block based table builder and pad timestamp in block based table reader.
On the write path, use the per column family option `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` to indicate whether user-defined timestamps should be stripped for all block based tables created for the column family.
On the read path, added a per table `TableReadOption.user_defined_timestamps_persisted` to flag whether the user keys in the table contains user defined timestamps.
This patch is mostly passing the related flags down to the block building/parsing level with the exception of handling the `first_internal_key` in `IndexValue`, which is included in the `IndexBuilder` level. The value part of range deletion entries should have a similar handling, I haven't decided where to best fit this piece of logic, I will do it in a follow up.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11495
Test Plan:
Existing test `BlockBasedTableReaderTest` is parameterized to run with:
1) different UDT test modes: kNone, kNormal, kStripUserDefinedTimestamp
2) all four index types, when index type is `kTwoLevelIndexSearch`, also enables partitioned filters
3) parallel vs non-parallel compression
4) enable/disable compression dictionary.
Also added tests for API `BlockBasedTableReader::NewIterator`.
`PartitionedFilterBlockTest` is parameterized to run with different UDT test modes:kNone, kNormal, kStripUserDefinedTimestamp.
```
make all check
./block_based_table_reader_test
./partitioned_filter_block_test
```
Reviewed By: ltamasi
Differential Revision: D46344577
Pulled By: jowlyzhang
fbshipit-source-id: 93ac8542b19319d1298712b8bed908c8831ba675
2023-06-01 18:10:03 +00:00
|
|
|
bool user_defined_timestamps_persisted() const {
|
|
|
|
assert(table_ != nullptr);
|
|
|
|
assert(table_->get_rep() != nullptr);
|
|
|
|
return table_->get_rep()->user_defined_timestamps_persisted;
|
|
|
|
}
|
|
|
|
|
2024-06-12 22:44:37 +00:00
|
|
|
Status GetOrReadIndexBlock(GetContext* get_context,
|
2020-03-13 04:39:36 +00:00
|
|
|
BlockCacheLookupContext* lookup_context,
|
2023-04-04 23:53:14 +00:00
|
|
|
CachableEntry<Block>* index_block,
|
|
|
|
const ReadOptions& read_options) const;
|
2020-03-13 04:39:36 +00:00
|
|
|
|
|
|
|
size_t ApproximateIndexBlockMemoryUsage() const {
|
|
|
|
assert(!index_block_.GetOwnValue() || index_block_.GetValue() != nullptr);
|
|
|
|
return index_block_.GetOwnValue()
|
|
|
|
? index_block_.GetValue()->ApproximateMemoryUsage()
|
|
|
|
: 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const BlockBasedTable* table_;
|
|
|
|
CachableEntry<Block> index_block_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|