mirror of
https://github.com/facebook/rocksdb.git
synced 2024-12-02 20:52:55 +00:00
57997ddaaf
Summary: This PR implements a coroutine version of batched MultiGet in order to concurrently read from multiple SST files in a level using async IO, thus reducing the latency of the MultiGet. The API from the user perspective is still synchronous and single threaded, with the RocksDB part of the processing happening in the context of the caller's thread. In Version::MultiGet, the decision is made whether to call synchronous or coroutine code. A good way to review this PR is to review the first 4 commits in order - de773b3, 70c2f70, 10b50e1, and 377a597 - before reviewing the rest. TODO: 1. Figure out how to build it in CircleCI (requires some dependencies to be installed) 2. Do some stress testing with coroutines enabled No regression in synchronous MultiGet between this branch and main - ``` ./db_bench -use_existing_db=true --db=/data/mysql/rocksdb/prefix_scan -benchmarks="readseq,multireadrandom" -key_size=32 -value_size=512 -num=5000000 -batch_size=64 -multiread_batched=true -use_direct_reads=false -duration=60 -ops_between_duration_checks=1 -readonly=true -adaptive_readahead=true -threads=16 -cache_size=10485760000 -async_io=false -multiread_stride=40000 -statistics ``` Branch - ```multireadrandom : 4.025 micros/op 3975111 ops/sec 60.001 seconds 238509056 operations; 2062.3 MB/s (14767808 of 14767808 found)``` Main - ```multireadrandom : 3.987 micros/op 4013216 ops/sec 60.001 seconds 240795392 operations; 2082.1 MB/s (15231040 of 15231040 found)``` More benchmarks in various scenarios are given below. The measurements were taken with ```async_io=false``` (no coroutines) and ```async_io=true``` (use coroutines). For an IO bound workload (with every key requiring an IO), the coroutines version shows a clear benefit, being ~2.6X faster. For CPU bound workloads, the coroutines version has ~6-15% higher CPU utilization, depending on how many keys overlap an SST file. 1. Single thread IO bound workload on remote storage with sparse MultiGet batch keys (~1 key overlap/file) - No coroutines - ```multireadrandom : 831.774 micros/op 1202 ops/sec 60.001 seconds 72136 operations; 0.6 MB/s (72136 of 72136 found)``` Using coroutines - ```multireadrandom : 318.742 micros/op 3137 ops/sec 60.003 seconds 188248 operations; 1.6 MB/s (188248 of 188248 found)``` 2. Single thread CPU bound workload (all data cached) with ~1 key overlap/file - No coroutines - ```multireadrandom : 4.127 micros/op 242322 ops/sec 60.000 seconds 14539384 operations; 125.7 MB/s (14539384 of 14539384 found)``` Using coroutines - ```multireadrandom : 4.741 micros/op 210935 ops/sec 60.000 seconds 12656176 operations; 109.4 MB/s (12656176 of 12656176 found)``` 3. Single thread CPU bound workload with ~2 key overlap/file - No coroutines - ```multireadrandom : 3.717 micros/op 269000 ops/sec 60.000 seconds 16140024 operations; 139.6 MB/s (16140024 of 16140024 found)``` Using coroutines - ```multireadrandom : 4.146 micros/op 241204 ops/sec 60.000 seconds 14472296 operations; 125.1 MB/s (14472296 of 14472296 found)``` 4. CPU bound multi-threaded (16 threads) with ~4 key overlap/file - No coroutines - ```multireadrandom : 4.534 micros/op 3528792 ops/sec 60.000 seconds 211728728 operations; 1830.7 MB/s (12737024 of 12737024 found) ``` Using coroutines - ```multireadrandom : 4.872 micros/op 3283812 ops/sec 60.000 seconds 197030096 operations; 1703.6 MB/s (12548032 of 12548032 found) ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/9968 Reviewed By: akankshamahajan15 Differential Revision: D36348563 Pulled By: anand1976 fbshipit-source-id: c0ce85a505fd26ebfbb09786cbd7f25202038696
161 lines
6.8 KiB
C++
161 lines
6.8 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
#include <memory>
|
|
#include "db/range_tombstone_fragmenter.h"
|
|
#if USE_COROUTINES
|
|
#include "folly/experimental/coro/Coroutine.h"
|
|
#include "folly/experimental/coro/Task.h"
|
|
#endif
|
|
#include "rocksdb/slice_transform.h"
|
|
#include "table/get_context.h"
|
|
#include "table/internal_iterator.h"
|
|
#include "table/multiget_context.h"
|
|
#include "table/table_reader_caller.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
class Iterator;
|
|
struct ParsedInternalKey;
|
|
class Slice;
|
|
class Arena;
|
|
struct ReadOptions;
|
|
struct TableProperties;
|
|
class GetContext;
|
|
class MultiGetContext;
|
|
|
|
// A Table (also referred to as SST) is a sorted map from strings to strings.
|
|
// Tables are immutable and persistent. A Table may be safely accessed from
|
|
// multiple threads without external synchronization. Table readers are used
|
|
// for reading various types of table formats supported by rocksdb including
|
|
// BlockBasedTable, PlainTable and CuckooTable format.
|
|
class TableReader {
|
|
public:
|
|
virtual ~TableReader() {}
|
|
|
|
// Returns a new iterator over the table contents.
|
|
// The result of NewIterator() is initially invalid (caller must
|
|
// call one of the Seek methods on the iterator before using it).
|
|
//
|
|
// read_options: Must outlive the returned iterator.
|
|
// arena: If not null, the arena needs to be used to allocate the Iterator.
|
|
// When destroying the iterator, the caller will not call "delete"
|
|
// but Iterator::~Iterator() directly. The destructor needs to destroy
|
|
// all the states but those allocated in arena.
|
|
// skip_filters: disables checking the bloom filters even if they exist. This
|
|
// option is effective only for block-based table format.
|
|
// compaction_readahead_size: its value will only be used if caller =
|
|
// kCompaction
|
|
virtual InternalIterator* NewIterator(
|
|
const ReadOptions& read_options, const SliceTransform* prefix_extractor,
|
|
Arena* arena, bool skip_filters, TableReaderCaller caller,
|
|
size_t compaction_readahead_size = 0,
|
|
bool allow_unprepared_value = false) = 0;
|
|
|
|
virtual FragmentedRangeTombstoneIterator* NewRangeTombstoneIterator(
|
|
const ReadOptions& /*read_options*/) {
|
|
return nullptr;
|
|
}
|
|
|
|
// Given a key, return an approximate byte offset in the file where
|
|
// the data for that key begins (or would begin if the key were
|
|
// present in the file). The returned value is in terms of file
|
|
// bytes, and so includes effects like compression of the underlying data.
|
|
// E.g., the approximate offset of the last key in the table will
|
|
// be close to the file length.
|
|
// TODO(peterd): Since this function is only used for approximate size
|
|
// from beginning of file, reduce code duplication by removing this
|
|
// function and letting ApproximateSize take optional start and end, so
|
|
// that absolute start and end can be specified and optimized without
|
|
// key / index work.
|
|
virtual uint64_t ApproximateOffsetOf(const Slice& key,
|
|
TableReaderCaller caller) = 0;
|
|
|
|
// Given start and end keys, return the approximate data size in the file
|
|
// between the keys. The returned value is in terms of file bytes, and so
|
|
// includes effects like compression of the underlying data and applicable
|
|
// portions of metadata including filters and indexes. Nullptr for start or
|
|
// end (or both) indicates absolute start or end of the table.
|
|
virtual uint64_t ApproximateSize(const Slice& start, const Slice& end,
|
|
TableReaderCaller caller) = 0;
|
|
|
|
// Set up the table for Compaction. Might change some parameters with
|
|
// posix_fadvise
|
|
virtual void SetupForCompaction() = 0;
|
|
|
|
virtual std::shared_ptr<const TableProperties> GetTableProperties() const = 0;
|
|
|
|
// Prepare work that can be done before the real Get()
|
|
virtual void Prepare(const Slice& /*target*/) {}
|
|
|
|
// Report an approximation of how much memory has been used.
|
|
virtual size_t ApproximateMemoryUsage() const = 0;
|
|
|
|
// Calls get_context->SaveValue() repeatedly, starting with
|
|
// the entry found after a call to Seek(key), until it returns false.
|
|
// May not make such a call if filter policy says that key is not present.
|
|
//
|
|
// get_context->MarkKeyMayExist needs to be called when it is configured to be
|
|
// memory only and the key is not found in the block cache.
|
|
//
|
|
// readOptions is the options for the read
|
|
// key is the key to search for
|
|
// skip_filters: disables checking the bloom filters even if they exist. This
|
|
// option is effective only for block-based table format.
|
|
virtual Status Get(const ReadOptions& readOptions, const Slice& key,
|
|
GetContext* get_context,
|
|
const SliceTransform* prefix_extractor,
|
|
bool skip_filters = false) = 0;
|
|
|
|
virtual void MultiGet(const ReadOptions& readOptions,
|
|
const MultiGetContext::Range* mget_range,
|
|
const SliceTransform* prefix_extractor,
|
|
bool skip_filters = false) {
|
|
for (auto iter = mget_range->begin(); iter != mget_range->end(); ++iter) {
|
|
*iter->s = Get(readOptions, iter->ikey, iter->get_context,
|
|
prefix_extractor, skip_filters);
|
|
}
|
|
}
|
|
|
|
#if USE_COROUTINES
|
|
virtual folly::coro::Task<void> MultiGetCoroutine(
|
|
const ReadOptions& readOptions, const MultiGetContext::Range* mget_range,
|
|
const SliceTransform* prefix_extractor, bool skip_filters = false) {
|
|
MultiGet(readOptions, mget_range, prefix_extractor, skip_filters);
|
|
co_return;
|
|
}
|
|
#endif // USE_COROUTINES
|
|
|
|
// Prefetch data corresponding to a give range of keys
|
|
// Typically this functionality is required for table implementations that
|
|
// persists the data on a non volatile storage medium like disk/SSD
|
|
virtual Status Prefetch(const Slice* begin = nullptr,
|
|
const Slice* end = nullptr) {
|
|
(void) begin;
|
|
(void) end;
|
|
// Default implementation is NOOP.
|
|
// The child class should implement functionality when applicable
|
|
return Status::OK();
|
|
}
|
|
|
|
// convert db file to a human readable form
|
|
virtual Status DumpTable(WritableFile* /*out_file*/) {
|
|
return Status::NotSupported("DumpTable() not supported");
|
|
}
|
|
|
|
// check whether there is corruption in this db file
|
|
virtual Status VerifyChecksum(const ReadOptions& /*read_options*/,
|
|
TableReaderCaller /*caller*/) {
|
|
return Status::NotSupported("VerifyChecksum() not supported");
|
|
}
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|