2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-11-24 21:01:09 +00:00
|
|
|
//
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. Use of
|
|
|
|
// this source code is governed by a BSD-style license that can be found
|
|
|
|
// in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// InlineSkipList is derived from SkipList (skiplist.h), but it optimizes
|
|
|
|
// the memory layout by requiring that the key storage be allocated through
|
|
|
|
// the skip list instance. For the common case of SkipList<const char*,
|
|
|
|
// Cmp> this saves 1 pointer per skip list node and gives better cache
|
|
|
|
// locality, at the expense of wasted padding from using AllocateAligned
|
|
|
|
// instead of Allocate for the keys. The unused padding will be from
|
|
|
|
// 0 to sizeof(void*)-1 bytes, and the space savings are sizeof(void*)
|
|
|
|
// bytes, so despite the padding the space used is always less than
|
|
|
|
// SkipList<const char*, ..>.
|
2015-11-24 21:01:09 +00:00
|
|
|
//
|
|
|
|
// Thread safety -------------
|
|
|
|
//
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
// Writes via Insert require external synchronization, most likely a mutex.
|
|
|
|
// InsertConcurrently can be safely called concurrently with reads and
|
|
|
|
// with other concurrent inserts. Reads require a guarantee that the
|
|
|
|
// InlineSkipList will not be destroyed while the read is in progress.
|
|
|
|
// Apart from that, reads progress without any internal locking or
|
|
|
|
// synchronization.
|
2015-11-24 21:01:09 +00:00
|
|
|
//
|
|
|
|
// Invariants:
|
|
|
|
//
|
|
|
|
// (1) Allocated nodes are never deleted until the InlineSkipList is
|
|
|
|
// destroyed. This is trivially guaranteed by the code since we never
|
|
|
|
// delete any skip list nodes.
|
|
|
|
//
|
|
|
|
// (2) The contents of a Node except for the next/prev pointers are
|
|
|
|
// immutable after the Node has been linked into the InlineSkipList.
|
|
|
|
// Only Insert() modifies the list, and it is careful to initialize a
|
|
|
|
// node and use release-stores to publish the nodes in one or more lists.
|
|
|
|
//
|
|
|
|
// ... prev vs. next pointer ordering ...
|
|
|
|
//
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdlib.h>
|
2022-10-28 20:16:50 +00:00
|
|
|
|
2016-11-13 21:00:52 +00:00
|
|
|
#include <algorithm>
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
#include <atomic>
|
2018-03-23 19:12:15 +00:00
|
|
|
#include <type_traits>
|
2022-10-28 20:16:50 +00:00
|
|
|
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/allocator.h"
|
2018-02-01 02:45:49 +00:00
|
|
|
#include "port/likely.h"
|
2015-11-24 21:01:09 +00:00
|
|
|
#include "port/port.h"
|
2018-03-23 19:12:15 +00:00
|
|
|
#include "rocksdb/slice.h"
|
2024-08-19 20:53:25 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2018-03-23 19:12:15 +00:00
|
|
|
#include "util/coding.h"
|
2015-11-24 21:01:09 +00:00
|
|
|
#include "util/random.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-11-24 21:01:09 +00:00
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
2015-11-24 21:01:09 +00:00
|
|
|
class InlineSkipList {
|
|
|
|
private:
|
|
|
|
struct Node;
|
2016-11-22 22:06:54 +00:00
|
|
|
struct Splice;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
|
|
|
public:
|
2022-10-28 20:16:50 +00:00
|
|
|
using DecodedKey =
|
|
|
|
typename std::remove_reference<Comparator>::type::DecodedType;
|
2018-03-23 19:12:15 +00:00
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
static const uint16_t kMaxPossibleHeight = 32;
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Create a new InlineSkipList object that will use "cmp" for comparing
|
|
|
|
// keys, and will allocate memory using "*allocator". Objects allocated
|
|
|
|
// in the allocator must remain allocated for the lifetime of the
|
|
|
|
// skiplist object.
|
|
|
|
explicit InlineSkipList(Comparator cmp, Allocator* allocator,
|
|
|
|
int32_t max_height = 12,
|
|
|
|
int32_t branching_factor = 4);
|
2019-09-12 01:07:12 +00:00
|
|
|
// No copying allowed
|
|
|
|
InlineSkipList(const InlineSkipList&) = delete;
|
|
|
|
InlineSkipList& operator=(const InlineSkipList&) = delete;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
// Allocates a key and a skip-list node, returning a pointer to the key
|
|
|
|
// portion of the node. This method is thread-safe if the allocator
|
|
|
|
// is thread-safe.
|
2015-11-24 21:29:50 +00:00
|
|
|
char* AllocateKey(size_t key_size);
|
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
// Allocate a splice using allocator.
|
|
|
|
Splice* AllocateSplice();
|
|
|
|
|
2019-09-12 23:53:31 +00:00
|
|
|
// Allocate a splice on heap.
|
|
|
|
Splice* AllocateSpliceOnHeap();
|
|
|
|
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// Inserts a key allocated by AllocateKey, after the actual key value
|
|
|
|
// has been filled in.
|
|
|
|
//
|
2015-11-24 21:01:09 +00:00
|
|
|
// REQUIRES: nothing that compares equal to key is currently in the list.
|
2016-11-22 22:06:54 +00:00
|
|
|
// REQUIRES: no concurrent calls to any of inserts.
|
2018-02-01 02:45:49 +00:00
|
|
|
bool Insert(const char* key);
|
2015-11-24 21:01:09 +00:00
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
// Inserts a key allocated by AllocateKey with a hint of last insert
|
|
|
|
// position in the skip-list. If hint points to nullptr, a new hint will be
|
|
|
|
// populated, which can be used in subsequent calls.
|
2016-11-13 21:00:52 +00:00
|
|
|
//
|
2016-11-22 22:06:54 +00:00
|
|
|
// It can be used to optimize the workload where there are multiple groups
|
|
|
|
// of keys, and each key is likely to insert to a location close to the last
|
|
|
|
// inserted key in the same group. One example is sequential inserts.
|
2016-11-13 21:00:52 +00:00
|
|
|
//
|
2016-11-22 22:06:54 +00:00
|
|
|
// REQUIRES: nothing that compares equal to key is currently in the list.
|
|
|
|
// REQUIRES: no concurrent calls to any of inserts.
|
2018-02-01 02:45:49 +00:00
|
|
|
bool InsertWithHint(const char* key, void** hint);
|
2016-11-13 21:00:52 +00:00
|
|
|
|
2019-09-12 23:53:31 +00:00
|
|
|
// Like InsertConcurrently, but with a hint
|
|
|
|
//
|
|
|
|
// REQUIRES: nothing that compares equal to key is currently in the list.
|
|
|
|
// REQUIRES: no concurrent calls that use same hint
|
|
|
|
bool InsertWithHintConcurrently(const char* key, void** hint);
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
// Like Insert, but external synchronization is not required.
|
2018-02-01 02:45:49 +00:00
|
|
|
bool InsertConcurrently(const char* key);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
// Inserts a node into the skip list. key must have been allocated by
|
|
|
|
// AllocateKey and then filled in by the caller. If UseCAS is true,
|
|
|
|
// then external synchronization is not required, otherwise this method
|
|
|
|
// may not be called concurrently with any other insertions.
|
|
|
|
//
|
|
|
|
// Regardless of whether UseCAS is true, the splice must be owned
|
|
|
|
// exclusively by the current thread. If allow_partial_splice_fix is
|
|
|
|
// true, then the cost of insertion is amortized O(log D), where D is
|
|
|
|
// the distance from the splice to the inserted key (measured as the
|
|
|
|
// number of intervening nodes). Note that this bound is very good for
|
|
|
|
// sequential insertions! If allow_partial_splice_fix is false then
|
|
|
|
// the existing splice will be ignored unless the current key is being
|
|
|
|
// inserted immediately after the splice. allow_partial_splice_fix ==
|
|
|
|
// false has worse running time for the non-sequential case O(log N),
|
|
|
|
// but a better constant factor.
|
|
|
|
template <bool UseCAS>
|
2018-02-01 02:45:49 +00:00
|
|
|
bool Insert(const char* key, Splice* splice, bool allow_partial_splice_fix);
|
2016-11-22 22:06:54 +00:00
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Returns true iff an entry that compares equal to key is in the list.
|
2015-11-24 21:29:50 +00:00
|
|
|
bool Contains(const char* key) const;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
Re-implement GetApproximateMemTableStats for skip lists (#13047)
Summary:
GetApproximateMemTableStats() could return some bad results with the standard skip list memtable. See this new db_bench test showing the dismal distribution of results when the actual number of entries in range is 1000:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.391 micros/op 718915 ops/sec 1.391 seconds 1000000 operations; 11.7 MB/s
approximatememtablestats : 3.711 micros/op 269492 ops/sec 3.711 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 2344.1611 StdDev: 26587.27
Min: 0 Median: 965.8555 Max: 835273
Percentiles: P50: 965.86 P75: 1610.77 P99: 12618.01 P99.9: 74991.58 P99.99: 830970.97
------------------------------------------------------
[ 0, 1 ] 131344 13.134% 13.134% ###
( 1, 2 ] 115 0.011% 13.146%
( 2, 3 ] 106 0.011% 13.157%
( 3, 4 ] 190 0.019% 13.176%
( 4, 6 ] 214 0.021% 13.197%
( 6, 10 ] 522 0.052% 13.249%
( 10, 15 ] 748 0.075% 13.324%
( 15, 22 ] 1002 0.100% 13.424%
( 22, 34 ] 1948 0.195% 13.619%
( 34, 51 ] 3067 0.307% 13.926%
( 51, 76 ] 4213 0.421% 14.347%
( 76, 110 ] 5721 0.572% 14.919%
( 110, 170 ] 11375 1.137% 16.056%
( 170, 250 ] 17928 1.793% 17.849%
( 250, 380 ] 36597 3.660% 21.509% #
( 380, 580 ] 77882 7.788% 29.297% ##
( 580, 870 ] 160193 16.019% 45.317% ###
( 870, 1300 ] 210098 21.010% 66.326% ####
( 1300, 1900 ] 167461 16.746% 83.072% ###
( 1900, 2900 ] 78678 7.868% 90.940% ##
( 2900, 4400 ] 47743 4.774% 95.715% #
( 4400, 6600 ] 17650 1.765% 97.480%
( 6600, 9900 ] 11895 1.190% 98.669%
( 9900, 14000 ] 4993 0.499% 99.168%
( 14000, 22000 ] 2384 0.238% 99.407%
( 22000, 33000 ] 1966 0.197% 99.603%
( 50000, 75000 ] 2968 0.297% 99.900%
( 570000, 860000 ] 999 0.100% 100.000%
readrandom : 1.967 micros/op 508487 ops/sec 1.967 seconds 1000000 operations; 8.2 MB/s (1000000 of 1000000 found)
```
Perhaps the only good thing to say about the old implementation was that it was fast, though apparently not that fast.
I've implemented a much more robust and reasonably fast new version of the function. It's still logarithmic but with some larger constant factors. The standard deviation from true count is around 20% or less, and roughly the CPU cost of two memtable point look-ups. See code comments for detail.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.478 micros/op 676434 ops/sec 1.478 seconds 1000000 operations; 11.0 MB/s
approximatememtablestats : 2.694 micros/op 371157 ops/sec 2.694 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 1073.5158 StdDev: 197.80
Min: 608 Median: 1079.9506 Max: 2176
Percentiles: P50: 1079.95 P75: 1223.69 P99: 1852.36 P99.9: 1898.70 P99.99: 2176.00
------------------------------------------------------
( 580, 870 ] 134848 13.485% 13.485% ###
( 870, 1300 ] 747868 74.787% 88.272% ###############
( 1300, 1900 ] 116536 11.654% 99.925% ##
( 1900, 2900 ] 748 0.075% 100.000%
readrandom : 1.997 micros/op 500654 ops/sec 1.997 seconds 1000000 operations; 8.1 MB/s (1000000 of 1000000 found)
```
We can already see that the distribution of results is dramatically better and wonderfully normal-looking, with relative standard deviation around 20%. The function is also FASTER, at least with these parameters. Let's look how this behavior generalizes, first *much* larger range:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=30000
filluniquerandom : 1.390 micros/op 719654 ops/sec 1.376 seconds 990000 operations; 11.7 MB/s
approximatememtablestats : 1.129 micros/op 885649 ops/sec 1.129 seconds 1000000 operations;
Reported entry count stats (expected 30000):
Count: 1000000 Average: 31098.8795 StdDev: 3601.47
Min: 21504 Median: 29333.9303 Max: 43008
Percentiles: P50: 29333.93 P75: 33018.00 P99: 43008.00 P99.9: 43008.00 P99.99: 43008.00
------------------------------------------------------
( 14000, 22000 ] 408 0.041% 0.041%
( 22000, 33000 ] 749327 74.933% 74.974% ###############
( 33000, 50000 ] 250265 25.027% 100.000% #####
readrandom : 1.894 micros/op 528083 ops/sec 1.894 seconds 1000000 operations; 8.5 MB/s (989989 of 1000000 found)
```
This is *even faster* and relatively *more accurate*, with relative standard deviation closer to 10%. Code comments explain why. Now let's look at smaller ranges. Implementation quirks or conveniences:
* When actual number in range is >= 40, the minimum return value is 40.
* When the actual is <= 10, it is guaranteed to return that actual number.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=75
...
filluniquerandom : 1.417 micros/op 705668 ops/sec 1.417 seconds 999975 operations; 11.4 MB/s
approximatememtablestats : 3.342 micros/op 299197 ops/sec 3.342 seconds 1000000 operations;
Reported entry count stats (expected 75):
Count: 1000000 Average: 75.1210 StdDev: 15.02
Min: 40 Median: 71.9395 Max: 256
Percentiles: P50: 71.94 P75: 89.69 P99: 119.12 P99.9: 166.68 P99.99: 229.78
------------------------------------------------------
( 34, 51 ] 38867 3.887% 3.887% #
( 51, 76 ] 550554 55.055% 58.942% ###########
( 76, 110 ] 398854 39.885% 98.828% ########
( 110, 170 ] 11353 1.135% 99.963%
( 170, 250 ] 364 0.036% 99.999%
( 250, 380 ] 8 0.001% 100.000%
readrandom : 1.861 micros/op 537224 ops/sec 1.861 seconds 1000000 operations; 8.7 MB/s (999974 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=25
...
filluniquerandom : 1.501 micros/op 666283 ops/sec 1.501 seconds 1000000 operations; 10.8 MB/s
approximatememtablestats : 5.118 micros/op 195401 ops/sec 5.118 seconds 1000000 operations;
Reported entry count stats (expected 25):
Count: 1000000 Average: 26.2392 StdDev: 4.58
Min: 25 Median: 28.4590 Max: 72
Percentiles: P50: 28.46 P75: 31.69 P99: 49.27 P99.9: 67.95 P99.99: 72.00
------------------------------------------------------
( 22, 34 ] 928936 92.894% 92.894% ###################
( 34, 51 ] 67960 6.796% 99.690% #
( 51, 76 ] 3104 0.310% 100.000%
readrandom : 1.892 micros/op 528595 ops/sec 1.892 seconds 1000000 operations; 8.6 MB/s (1000000 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=10
...
filluniquerandom : 1.642 micros/op 608916 ops/sec 1.642 seconds 1000000 operations; 9.9 MB/s
approximatememtablestats : 3.042 micros/op 328721 ops/sec 3.042 seconds 1000000 operations;
Reported entry count stats (expected 10):
Count: 1000000 Average: 10.0000 StdDev: 0.00
Min: 10 Median: 10.0000 Max: 10
Percentiles: P50: 10.00 P75: 10.00 P99: 10.00 P99.9: 10.00 P99.99: 10.00
------------------------------------------------------
( 6, 10 ] 1000000 100.000% 100.000% ####################
readrandom : 1.805 micros/op 554126 ops/sec 1.805 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
```
Remarkably consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13047
Test Plan: new db_bench test for both performance and accuracy (see above); added to crash test; unit test updated.
Reviewed By: cbi42
Differential Revision: D63722003
Pulled By: pdillinger
fbshipit-source-id: cfc8613c085e87c17ecec22d82601aac2a5a1b26
2024-10-02 21:25:50 +00:00
|
|
|
// Return estimated number of entries from `start_ikey` to `end_ikey`.
|
|
|
|
uint64_t ApproximateNumEntries(const Slice& start_ikey,
|
|
|
|
const Slice& end_ikey) const;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
2016-11-13 21:00:52 +00:00
|
|
|
// Validate correctness of the skip-list.
|
|
|
|
void TEST_Validate() const;
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Iteration over the contents of a skip list
|
|
|
|
class Iterator {
|
|
|
|
public:
|
|
|
|
// Initialize an iterator over the specified list.
|
|
|
|
// The returned iterator is not valid.
|
|
|
|
explicit Iterator(const InlineSkipList* list);
|
|
|
|
|
|
|
|
// Change the underlying skiplist used for this iterator
|
|
|
|
// This enables us not changing the iterator without deallocating
|
|
|
|
// an old one and then allocating a new one
|
|
|
|
void SetList(const InlineSkipList* list);
|
|
|
|
|
|
|
|
// Returns true iff the iterator is positioned at a valid node.
|
|
|
|
bool Valid() const;
|
|
|
|
|
|
|
|
// Returns the key at the current position.
|
|
|
|
// REQUIRES: Valid()
|
2015-11-24 21:29:50 +00:00
|
|
|
const char* key() const;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
|
|
|
// Advances to the next position.
|
|
|
|
// REQUIRES: Valid()
|
|
|
|
void Next();
|
|
|
|
|
2024-08-19 20:53:25 +00:00
|
|
|
[[nodiscard]] Status NextAndValidate(bool allow_data_in_errors);
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Advances to the previous position.
|
|
|
|
// REQUIRES: Valid()
|
|
|
|
void Prev();
|
|
|
|
|
2024-08-19 20:53:25 +00:00
|
|
|
[[nodiscard]] Status PrevAndValidate(bool allow_data_in_errors);
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Advance to the first entry with a key >= target
|
2015-11-24 21:29:50 +00:00
|
|
|
void Seek(const char* target);
|
2015-11-24 21:01:09 +00:00
|
|
|
|
2024-08-19 20:53:25 +00:00
|
|
|
[[nodiscard]] Status SeekAndValidate(const char* target,
|
|
|
|
bool allow_data_in_errors);
|
|
|
|
|
2016-09-28 01:20:57 +00:00
|
|
|
// Retreat to the last entry with a key <= target
|
|
|
|
void SeekForPrev(const char* target);
|
|
|
|
|
2021-08-11 01:07:48 +00:00
|
|
|
// Advance to a random entry in the list.
|
|
|
|
void RandomSeek();
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Position at the first entry in list.
|
|
|
|
// Final state of iterator is Valid() iff list is not empty.
|
|
|
|
void SeekToFirst();
|
|
|
|
|
|
|
|
// Position at the last entry in list.
|
|
|
|
// Final state of iterator is Valid() iff list is not empty.
|
|
|
|
void SeekToLast();
|
|
|
|
|
|
|
|
private:
|
|
|
|
const InlineSkipList* list_;
|
|
|
|
Node* node_;
|
|
|
|
// Intentionally copyable
|
|
|
|
};
|
|
|
|
|
|
|
|
private:
|
|
|
|
const uint16_t kMaxHeight_;
|
|
|
|
const uint16_t kBranching_;
|
|
|
|
const uint32_t kScaledInverseBranching_;
|
|
|
|
|
Reorder field based on esan data
Summary:
Running. TEST_TMPDIR=/dev/shm ./buck-out/gen/rocks/tools/rocks_db_bench --benchmarks=readwhilewriting --num=5000000 -benchmark_write_rate_limit=2000000 --threads=32
Collected esan data and reorder field. Accesses to 4th and 6th fields take majority of the access. Group them. Overall, this struct takes 10%+ of the total accesses in the program. (637773011/6107964986)
==2433831== class rocksdb::InlineSkipList
==2433831== size = 48, count = 637773011, ratio = 112412, array access = 0
==2433831== # 0: offset = 0, size = 2, count = 455137, type = i16
==2433831== # 1: offset = 2, size = 2, count = 6, type = i16
==2433831== # 2: offset = 4, size = 4, count = 182303, type = i32
==2433831== # 3: offset = 8, size = 8, count = 263953900, type = %"class.rocksdb::MemTableRep::KeyComparator"*
==2433831== # 4: offset = 16, size = 8, count = 136409, type = %"class.rocksdb::Allocator"*
==2433831== # 5: offset = 24, size = 8, count = 366628820, type = %"struct.rocksdb::InlineSkipList<const rocksdb::MemTableRep::KeyComparator &>::Node"*
==2433831== # 6: offset = 32, size = 4, count = 6280031, type = %"struct.std::atomic" = type { %"struct.std::__atomic_base" }
==2433831== # 7: offset = 40, size = 8, count = 136405, type = %"struct.rocksdb::InlineSkipList<const rocksdb::MemTableRep::KeyComparator &>::Splice"*
==2433831==EfficiencySanitizer: total struct field access count = 6107964986
Before re-ordering
[trentxintong@devbig460.frc2 ~/fbsource/fbcode]$ fgrep readwhilewriting
without-ro.log
readwhilewriting : 0.036 micros/op 27545605 ops/sec; 26.8 MB/s
(45954 of 5000000 found)
readwhilewriting : 0.036 micros/op 28024240 ops/sec; 27.2 MB/s
(43158 of 5000000 found)
readwhilewriting : 0.037 micros/op 27345145 ops/sec; 27.1 MB/s
(46725 of 5000000 found)
readwhilewriting : 0.037 micros/op 27072588 ops/sec; 27.3 MB/s
(42605 of 5000000 found)
readwhilewriting : 0.034 micros/op 29578781 ops/sec; 28.3 MB/s
(44294 of 5000000 found)
readwhilewriting : 0.035 micros/op 28528304 ops/sec; 27.7 MB/s
(44176 of 5000000 found)
readwhilewriting : 0.037 micros/op 27075497 ops/sec; 26.5 MB/s
(43763 of 5000000 found)
readwhilewriting : 0.036 micros/op 28024117 ops/sec; 27.1 MB/s
(40622 of 5000000 found)
readwhilewriting : 0.037 micros/op 27078709 ops/sec; 27.6 MB/s
(47774 of 5000000 found)
readwhilewriting : 0.034 micros/op 29020689 ops/sec; 28.1 MB/s
(45066 of 5000000 found)
AVERAGE()=27.37 MB/s
After re-ordering
[trentxintong@devbig460.frc2 ~/fbsource/fbcode]$ fgrep readwhilewriting
ro.log
readwhilewriting : 0.036 micros/op 27542409 ops/sec; 27.7 MB/s
(46163 of 5000000 found)
readwhilewriting : 0.036 micros/op 28021148 ops/sec; 28.2 MB/s
(46155 of 5000000 found)
readwhilewriting : 0.036 micros/op 28021035 ops/sec; 27.3 MB/s
(44039 of 5000000 found)
readwhilewriting : 0.036 micros/op 27538659 ops/sec; 27.5 MB/s
(46781 of 5000000 found)
readwhilewriting : 0.036 micros/op 28028604 ops/sec; 27.6 MB/s
(44689 of 5000000 found)
readwhilewriting : 0.036 micros/op 27541452 ops/sec; 27.3 MB/s
(43156 of 5000000 found)
readwhilewriting : 0.034 micros/op 29041338 ops/sec; 28.8 MB/s
(44895 of 5000000 found)
readwhilewriting : 0.036 micros/op 27784974 ops/sec; 26.3 MB/s
(39963 of 5000000 found)
readwhilewriting : 0.036 micros/op 27538892 ops/sec; 28.1 MB/s
(46570 of 5000000 found)
readwhilewriting : 0.038 micros/op 26622473 ops/sec; 27.0 MB/s
(43236 of 5000000 found)
AVERAGE()=27.58 MB/s
Closes https://github.com/facebook/rocksdb/pull/3855
Reviewed By: siying
Differential Revision: D8048781
Pulled By: trentxintong
fbshipit-source-id: bc9807a9845e2a92cb171ce1ecb5a2c8a51f1481
2018-05-18 00:49:06 +00:00
|
|
|
Allocator* const allocator_; // Allocator used for allocations of nodes
|
2015-11-24 21:01:09 +00:00
|
|
|
// Immutable after construction
|
|
|
|
Comparator const compare_;
|
|
|
|
Node* const head_;
|
|
|
|
|
|
|
|
// Modified only by Insert(). Read racily by readers, but stale
|
|
|
|
// values are ok.
|
|
|
|
std::atomic<int> max_height_; // Height of the entire list
|
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
// seq_splice_ is a Splice used for insertions in the non-concurrent
|
|
|
|
// case. It caches the prev and next found during the most recent
|
|
|
|
// non-concurrent insertion.
|
|
|
|
Splice* seq_splice_;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
|
|
|
inline int GetMaxHeight() const {
|
|
|
|
return max_height_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
int RandomHeight();
|
2015-11-24 21:29:50 +00:00
|
|
|
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
Node* AllocateNode(size_t key_size, int height);
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
bool Equal(const char* a, const char* b) const {
|
|
|
|
return (compare_(a, b) == 0);
|
|
|
|
}
|
2015-11-24 21:01:09 +00:00
|
|
|
|
2016-09-28 01:20:57 +00:00
|
|
|
bool LessThan(const char* a, const char* b) const {
|
|
|
|
return (compare_(a, b) < 0);
|
|
|
|
}
|
|
|
|
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// Return true if key is greater than the data stored in "n". Null n
|
2016-11-22 22:06:54 +00:00
|
|
|
// is considered infinite. n should not be head_.
|
2015-11-24 21:29:50 +00:00
|
|
|
bool KeyIsAfterNode(const char* key, Node* n) const;
|
2018-03-23 19:12:15 +00:00
|
|
|
bool KeyIsAfterNode(const DecodedKey& key, Node* n) const;
|
2015-11-24 21:01:09 +00:00
|
|
|
|
|
|
|
// Returns the earliest node with a key >= key.
|
2024-08-19 20:53:25 +00:00
|
|
|
// Returns nullptr if there is no such node.
|
|
|
|
// @param out_of_order_node If not null, will validate the order of visited
|
|
|
|
// nodes. If a pair of out-of-order nodes n1 and n2 are found, n1 will be
|
|
|
|
// returned and *out_of_order_node will be set to n2.
|
|
|
|
Node* FindGreaterOrEqual(const char* key, Node** out_of_order_node) const;
|
|
|
|
|
|
|
|
// Returns the latest node with a key < key.
|
|
|
|
// Returns head_ if there is no such node.
|
2015-11-24 21:01:09 +00:00
|
|
|
// Fills prev[level] with pointer to previous node at "level" for every
|
|
|
|
// level in [0..max_height_-1], if prev is non-null.
|
2024-08-19 20:53:25 +00:00
|
|
|
// @param out_of_order_node If not null, will validate the order of visited
|
|
|
|
// nodes. If a pair of out-of-order nodes n1 and n2 are found, n1 will be
|
|
|
|
// returned and *out_of_order_node will be set to n2.
|
|
|
|
Node* FindLessThan(const char* key, Node** out_of_order_node) const;
|
2016-11-13 21:00:52 +00:00
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// Return the last node in the list.
|
|
|
|
// Return head_ if list is empty.
|
|
|
|
Node* FindLast() const;
|
|
|
|
|
2021-08-11 01:07:48 +00:00
|
|
|
// Returns a random entry.
|
|
|
|
Node* FindRandomEntry() const;
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
// Traverses a single level of the list, setting *out_prev to the last
|
|
|
|
// node before the key and *out_next to the first node after. Assumes
|
|
|
|
// that the key is not present in the skip list. On entry, before should
|
|
|
|
// point to a node that is before the key, and after should point to
|
|
|
|
// a node that is after the key. after should be nullptr if a good after
|
|
|
|
// node isn't conveniently available.
|
2022-10-28 20:16:50 +00:00
|
|
|
template <bool prefetch_before>
|
|
|
|
void FindSpliceForLevel(const DecodedKey& key, Node* before, Node* after,
|
|
|
|
int level, Node** out_prev, Node** out_next);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
// Recomputes Splice levels from highest_level (inclusive) down to
|
|
|
|
// lowest_level (inclusive).
|
2018-03-23 19:12:15 +00:00
|
|
|
void RecomputeSpliceLevels(const DecodedKey& key, Splice* splice,
|
2016-11-22 22:06:54 +00:00
|
|
|
int recompute_level);
|
2024-08-19 20:53:25 +00:00
|
|
|
|
|
|
|
static Status Corruption(Node* prev, Node* next, bool allow_data_in_errors);
|
2015-11-24 21:01:09 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Implementation details follow
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
template <class Comparator>
|
|
|
|
struct InlineSkipList<Comparator>::Splice {
|
|
|
|
// The invariant of a Splice is that prev_[i+1].key <= prev_[i].key <
|
|
|
|
// next_[i].key <= next_[i+1].key for all i. That means that if a
|
|
|
|
// key is bracketed by prev_[i] and next_[i] then it is bracketed by
|
|
|
|
// all higher levels. It is _not_ required that prev_[i]->Next(i) ==
|
|
|
|
// next_[i] (it probably did at some point in the past, but intervening
|
|
|
|
// or concurrent operations might have inserted nodes in between).
|
|
|
|
int height_ = 0;
|
|
|
|
Node** prev_;
|
|
|
|
Node** next_;
|
|
|
|
};
|
|
|
|
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// The Node data type is more of a pointer into custom-managed memory than
|
|
|
|
// a traditional C++ struct. The key is stored in the bytes immediately
|
|
|
|
// after the struct, and the next_ pointers for nodes with height > 1 are
|
|
|
|
// stored immediately _before_ the struct. This avoids the need to include
|
|
|
|
// any pointer or sizing data, which reduces per-node memory overheads.
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
struct InlineSkipList<Comparator>::Node {
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// Stores the height of the node in the memory location normally used for
|
|
|
|
// next_[0]. This is used for passing data from AllocateKey to Insert.
|
|
|
|
void StashHeight(const int height) {
|
|
|
|
assert(sizeof(int) <= sizeof(next_[0]));
|
2018-04-20 20:28:05 +00:00
|
|
|
memcpy(static_cast<void*>(&next_[0]), &height, sizeof(int));
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieves the value passed to StashHeight. Undefined after a call
|
|
|
|
// to SetNext or NoBarrier_SetNext.
|
|
|
|
int UnstashHeight() const {
|
|
|
|
int rv;
|
|
|
|
memcpy(&rv, &next_[0], sizeof(int));
|
|
|
|
return rv;
|
|
|
|
}
|
2015-11-24 21:01:09 +00:00
|
|
|
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
const char* Key() const { return reinterpret_cast<const char*>(&next_[1]); }
|
2015-11-24 21:01:09 +00:00
|
|
|
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// Accessors/mutators for links. Wrapped in methods so we can add
|
|
|
|
// the appropriate barriers as necessary, and perform the necessary
|
|
|
|
// addressing trickery for storing links below the Node in memory.
|
2015-11-24 21:01:09 +00:00
|
|
|
Node* Next(int n) {
|
|
|
|
assert(n >= 0);
|
|
|
|
// Use an 'acquire load' so that we observe a fully initialized
|
|
|
|
// version of the returned Node.
|
2018-04-20 20:28:05 +00:00
|
|
|
return ((&next_[0] - n)->load(std::memory_order_acquire));
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
void SetNext(int n, Node* x) {
|
|
|
|
assert(n >= 0);
|
|
|
|
// Use a 'release store' so that anybody who reads through this
|
|
|
|
// pointer observes a fully initialized version of the inserted node.
|
2018-04-20 20:28:05 +00:00
|
|
|
(&next_[0] - n)->store(x, std::memory_order_release);
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
bool CASNext(int n, Node* expected, Node* x) {
|
|
|
|
assert(n >= 0);
|
2018-04-20 20:28:05 +00:00
|
|
|
return (&next_[0] - n)->compare_exchange_strong(expected, x);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
// No-barrier variants that can be safely used in a few locations.
|
|
|
|
Node* NoBarrier_Next(int n) {
|
|
|
|
assert(n >= 0);
|
2018-04-20 20:28:05 +00:00
|
|
|
return (&next_[0] - n)->load(std::memory_order_relaxed);
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
void NoBarrier_SetNext(int n, Node* x) {
|
|
|
|
assert(n >= 0);
|
2018-04-20 20:28:05 +00:00
|
|
|
(&next_[0] - n)->store(x, std::memory_order_relaxed);
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
2016-11-13 21:00:52 +00:00
|
|
|
// Insert node after prev on specific level.
|
|
|
|
void InsertAfter(Node* prev, int level) {
|
|
|
|
// NoBarrier_SetNext() suffices since we will add a barrier when
|
|
|
|
// we publish a pointer to "this" in prev.
|
|
|
|
NoBarrier_SetNext(level, prev->NoBarrier_Next(level));
|
|
|
|
prev->SetNext(level, this);
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:01:09 +00:00
|
|
|
private:
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
// next_[0] is the lowest level link (level 0). Higher levels are
|
|
|
|
// stored _earlier_, so level 1 is at next_[-1].
|
2015-11-24 21:01:09 +00:00
|
|
|
std::atomic<Node*> next_[1];
|
|
|
|
};
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline InlineSkipList<Comparator>::Iterator::Iterator(
|
2015-11-24 21:01:09 +00:00
|
|
|
const InlineSkipList* list) {
|
|
|
|
SetList(list);
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline void InlineSkipList<Comparator>::Iterator::SetList(
|
2015-11-24 21:01:09 +00:00
|
|
|
const InlineSkipList* list) {
|
|
|
|
list_ = list;
|
|
|
|
node_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline bool InlineSkipList<Comparator>::Iterator::Valid() const {
|
2015-11-24 21:01:09 +00:00
|
|
|
return node_ != nullptr;
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline const char* InlineSkipList<Comparator>::Iterator::key() const {
|
2015-11-24 21:01:09 +00:00
|
|
|
assert(Valid());
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
return node_->Key();
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline void InlineSkipList<Comparator>::Iterator::Next() {
|
2015-11-24 21:01:09 +00:00
|
|
|
assert(Valid());
|
|
|
|
node_ = node_->Next(0);
|
|
|
|
}
|
|
|
|
|
2024-08-19 20:53:25 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline Status InlineSkipList<Comparator>::Iterator::NextAndValidate(
|
|
|
|
bool allow_data_in_errors) {
|
|
|
|
assert(Valid());
|
|
|
|
Node* prev_node = node_;
|
|
|
|
node_ = node_->Next(0);
|
|
|
|
// Verify that keys are increasing.
|
|
|
|
if (prev_node != list_->head_ && node_ != nullptr &&
|
|
|
|
list_->compare_(prev_node->Key(), node_->Key()) >= 0) {
|
|
|
|
Node* node = node_;
|
|
|
|
// invalidates the iterator
|
|
|
|
node_ = nullptr;
|
|
|
|
return Corruption(prev_node, node, allow_data_in_errors);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline void InlineSkipList<Comparator>::Iterator::Prev() {
|
2015-11-24 21:01:09 +00:00
|
|
|
// Instead of using explicit "prev" links, we just search for the
|
|
|
|
// last node that falls before key.
|
|
|
|
assert(Valid());
|
2024-08-19 20:53:25 +00:00
|
|
|
node_ = list_->FindLessThan(node_->Key(), nullptr);
|
2015-11-24 21:01:09 +00:00
|
|
|
if (node_ == list_->head_) {
|
|
|
|
node_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-19 20:53:25 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline Status InlineSkipList<Comparator>::Iterator::PrevAndValidate(
|
|
|
|
const bool allow_data_in_errors) {
|
|
|
|
assert(Valid());
|
|
|
|
// Skip list validation is done in FindLessThan().
|
|
|
|
Node* out_of_order_node = nullptr;
|
|
|
|
node_ = list_->FindLessThan(node_->Key(), &out_of_order_node);
|
|
|
|
if (out_of_order_node) {
|
|
|
|
Node* node = node_;
|
|
|
|
node_ = nullptr;
|
|
|
|
return Corruption(node, out_of_order_node, allow_data_in_errors);
|
|
|
|
}
|
|
|
|
if (node_ == list_->head_) {
|
|
|
|
node_ = nullptr;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline void InlineSkipList<Comparator>::Iterator::Seek(const char* target) {
|
2024-08-19 20:53:25 +00:00
|
|
|
node_ = list_->FindGreaterOrEqual(target, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Comparator>
|
|
|
|
inline Status InlineSkipList<Comparator>::Iterator::SeekAndValidate(
|
|
|
|
const char* target, const bool allow_data_in_errors) {
|
|
|
|
Node* out_of_order_node = nullptr;
|
|
|
|
node_ = list_->FindGreaterOrEqual(target, &out_of_order_node);
|
|
|
|
if (out_of_order_node) {
|
|
|
|
Node* node = node_;
|
|
|
|
node_ = nullptr;
|
|
|
|
return Corruption(node, out_of_order_node, allow_data_in_errors);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
2016-09-28 01:20:57 +00:00
|
|
|
inline void InlineSkipList<Comparator>::Iterator::SeekForPrev(
|
|
|
|
const char* target) {
|
|
|
|
Seek(target);
|
|
|
|
if (!Valid()) {
|
|
|
|
SeekToLast();
|
|
|
|
}
|
|
|
|
while (Valid() && list_->LessThan(target, key())) {
|
|
|
|
Prev();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-11 01:07:48 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline void InlineSkipList<Comparator>::Iterator::RandomSeek() {
|
|
|
|
node_ = list_->FindRandomEntry();
|
|
|
|
}
|
|
|
|
|
2016-09-28 01:20:57 +00:00
|
|
|
template <class Comparator>
|
2015-11-24 21:29:50 +00:00
|
|
|
inline void InlineSkipList<Comparator>::Iterator::SeekToFirst() {
|
2015-11-24 21:01:09 +00:00
|
|
|
node_ = list_->head_->Next(0);
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
inline void InlineSkipList<Comparator>::Iterator::SeekToLast() {
|
2015-11-24 21:01:09 +00:00
|
|
|
node_ = list_->FindLast();
|
|
|
|
if (node_ == list_->head_) {
|
|
|
|
node_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
int InlineSkipList<Comparator>::RandomHeight() {
|
2015-11-24 21:01:09 +00:00
|
|
|
auto rnd = Random::GetTLSInstance();
|
|
|
|
|
|
|
|
// Increase height with probability 1 in kBranching
|
|
|
|
int height = 1;
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
while (height < kMaxHeight_ && height < kMaxPossibleHeight &&
|
|
|
|
rnd->Next() < kScaledInverseBranching_) {
|
2015-11-24 21:01:09 +00:00
|
|
|
height++;
|
|
|
|
}
|
2024-08-19 20:53:25 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("InlineSkipList::RandomHeight::height", &height);
|
2015-11-24 21:01:09 +00:00
|
|
|
assert(height > 0);
|
|
|
|
assert(height <= kMaxHeight_);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
assert(height <= kMaxPossibleHeight);
|
2015-11-24 21:01:09 +00:00
|
|
|
return height;
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
bool InlineSkipList<Comparator>::KeyIsAfterNode(const char* key,
|
|
|
|
Node* n) const {
|
2015-11-24 21:01:09 +00:00
|
|
|
// nullptr n is considered infinite
|
2016-11-22 22:06:54 +00:00
|
|
|
assert(n != head_);
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
return (n != nullptr) && (compare_(n->Key(), key) < 0);
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 19:12:15 +00:00
|
|
|
template <class Comparator>
|
|
|
|
bool InlineSkipList<Comparator>::KeyIsAfterNode(const DecodedKey& key,
|
|
|
|
Node* n) const {
|
|
|
|
// nullptr n is considered infinite
|
|
|
|
assert(n != head_);
|
|
|
|
return (n != nullptr) && (compare_(n->Key(), key) < 0);
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
typename InlineSkipList<Comparator>::Node*
|
2024-08-19 20:53:25 +00:00
|
|
|
InlineSkipList<Comparator>::FindGreaterOrEqual(
|
|
|
|
const char* key, Node** const out_of_order_node) const {
|
2015-11-24 21:01:09 +00:00
|
|
|
// Note: It looks like we could reduce duplication by implementing
|
|
|
|
// this function as FindLessThan(key)->Next(0), but we wouldn't be able
|
|
|
|
// to exit early on equality and the result wouldn't even be correct.
|
|
|
|
// A concurrent insert might occur after FindLessThan(key) but before
|
|
|
|
// we get a chance to call Next(0).
|
|
|
|
Node* x = head_;
|
|
|
|
int level = GetMaxHeight() - 1;
|
|
|
|
Node* last_bigger = nullptr;
|
2018-03-23 19:12:15 +00:00
|
|
|
const DecodedKey key_decoded = compare_.decode_key(key);
|
2015-11-24 21:01:09 +00:00
|
|
|
while (true) {
|
|
|
|
Node* next = x->Next(level);
|
2017-10-05 01:03:29 +00:00
|
|
|
if (next != nullptr) {
|
|
|
|
PREFETCH(next->Next(level), 0, 1);
|
2024-08-19 20:53:25 +00:00
|
|
|
if (out_of_order_node && x != head_ &&
|
|
|
|
compare_(x->Key(), next->Key()) >= 0) {
|
|
|
|
*out_of_order_node = next;
|
|
|
|
return x;
|
|
|
|
}
|
2017-10-05 01:03:29 +00:00
|
|
|
}
|
2015-11-24 21:01:09 +00:00
|
|
|
// Make sure the lists are sorted
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
assert(x == head_ || next == nullptr || KeyIsAfterNode(next->Key(), x));
|
2015-11-24 21:01:09 +00:00
|
|
|
// Make sure we haven't overshot during our search
|
2018-03-23 19:12:15 +00:00
|
|
|
assert(x == head_ || KeyIsAfterNode(key_decoded, x));
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
int cmp = (next == nullptr || next == last_bigger)
|
|
|
|
? 1
|
2018-03-23 19:12:15 +00:00
|
|
|
: compare_(next->Key(), key_decoded);
|
2015-11-24 21:01:09 +00:00
|
|
|
if (cmp == 0 || (cmp > 0 && level == 0)) {
|
|
|
|
return next;
|
|
|
|
} else if (cmp < 0) {
|
|
|
|
// Keep searching in this list
|
|
|
|
x = next;
|
|
|
|
} else {
|
|
|
|
// Switch to next list, reuse compare_() result
|
|
|
|
last_bigger = next;
|
|
|
|
level--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
typename InlineSkipList<Comparator>::Node*
|
2024-08-19 20:53:25 +00:00
|
|
|
InlineSkipList<Comparator>::FindLessThan(const char* key,
|
|
|
|
Node** const out_of_order_node) const {
|
|
|
|
int level = GetMaxHeight() - 1;
|
|
|
|
assert(level >= 0);
|
|
|
|
Node* x = head_;
|
2015-11-24 21:01:09 +00:00
|
|
|
// KeyIsAfter(key, last_not_after) is definitely false
|
|
|
|
Node* last_not_after = nullptr;
|
2018-03-23 19:12:15 +00:00
|
|
|
const DecodedKey key_decoded = compare_.decode_key(key);
|
2015-11-24 21:01:09 +00:00
|
|
|
while (true) {
|
2017-09-07 21:11:15 +00:00
|
|
|
assert(x != nullptr);
|
2015-11-24 21:01:09 +00:00
|
|
|
Node* next = x->Next(level);
|
2017-10-05 01:03:29 +00:00
|
|
|
if (next != nullptr) {
|
|
|
|
PREFETCH(next->Next(level), 0, 1);
|
2024-08-19 20:53:25 +00:00
|
|
|
if (out_of_order_node && x != head_ &&
|
|
|
|
compare_(x->Key(), next->Key()) >= 0) {
|
|
|
|
*out_of_order_node = next;
|
|
|
|
return x;
|
|
|
|
}
|
2017-10-05 01:03:29 +00:00
|
|
|
}
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
assert(x == head_ || next == nullptr || KeyIsAfterNode(next->Key(), x));
|
2018-03-23 19:12:15 +00:00
|
|
|
assert(x == head_ || KeyIsAfterNode(key_decoded, x));
|
|
|
|
if (next != last_not_after && KeyIsAfterNode(key_decoded, next)) {
|
2015-11-24 21:01:09 +00:00
|
|
|
// Keep searching in this list
|
2017-09-07 21:11:15 +00:00
|
|
|
assert(next != nullptr);
|
2015-11-24 21:01:09 +00:00
|
|
|
x = next;
|
|
|
|
} else {
|
2024-08-19 20:53:25 +00:00
|
|
|
if (level == 0) {
|
2015-11-24 21:01:09 +00:00
|
|
|
return x;
|
|
|
|
} else {
|
2016-11-13 21:00:52 +00:00
|
|
|
// Switch to next list, reuse KeyIsAfterNode() result
|
2015-11-24 21:01:09 +00:00
|
|
|
last_not_after = next;
|
|
|
|
level--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
typename InlineSkipList<Comparator>::Node*
|
|
|
|
InlineSkipList<Comparator>::FindLast() const {
|
2015-11-24 21:01:09 +00:00
|
|
|
Node* x = head_;
|
|
|
|
int level = GetMaxHeight() - 1;
|
|
|
|
while (true) {
|
|
|
|
Node* next = x->Next(level);
|
|
|
|
if (next == nullptr) {
|
|
|
|
if (level == 0) {
|
|
|
|
return x;
|
|
|
|
} else {
|
|
|
|
// Switch to next list
|
|
|
|
level--;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
x = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-11 01:07:48 +00:00
|
|
|
template <class Comparator>
|
|
|
|
typename InlineSkipList<Comparator>::Node*
|
|
|
|
InlineSkipList<Comparator>::FindRandomEntry() const {
|
|
|
|
// TODO(bjlemaire): consider adding PREFETCH calls.
|
|
|
|
Node *x = head_, *scan_node = nullptr, *limit_node = nullptr;
|
|
|
|
|
|
|
|
// We start at the max level.
|
|
|
|
// FOr each level, we look at all the nodes at the level, and
|
|
|
|
// we randomly pick one of them. Then decrement the level
|
|
|
|
// and reiterate the process.
|
|
|
|
// eg: assume GetMaxHeight()=5, and there are #100 elements (nodes).
|
|
|
|
// level 4 nodes: lvl_nodes={#1, #15, #67, #84}. Randomly pick #15.
|
|
|
|
// We will consider all the nodes between #15 (inclusive) and #67
|
|
|
|
// (exclusive). #67 is called 'limit_node' here.
|
|
|
|
// level 3 nodes: lvl_nodes={#15, #21, #45, #51}. Randomly choose
|
|
|
|
// #51. #67 remains 'limit_node'.
|
|
|
|
// [...]
|
|
|
|
// level 0 nodes: lvl_nodes={#56,#57,#58,#59}. Randomly pick $57.
|
|
|
|
// Return Node #57.
|
|
|
|
std::vector<Node*> lvl_nodes;
|
|
|
|
Random* rnd = Random::GetTLSInstance();
|
|
|
|
int level = GetMaxHeight() - 1;
|
|
|
|
|
|
|
|
while (level >= 0) {
|
|
|
|
lvl_nodes.clear();
|
|
|
|
scan_node = x;
|
|
|
|
while (scan_node != limit_node) {
|
|
|
|
lvl_nodes.push_back(scan_node);
|
|
|
|
scan_node = scan_node->Next(level);
|
|
|
|
}
|
|
|
|
uint32_t rnd_idx = rnd->Next() % lvl_nodes.size();
|
|
|
|
x = lvl_nodes[rnd_idx];
|
|
|
|
if (rnd_idx + 1 < lvl_nodes.size()) {
|
|
|
|
limit_node = lvl_nodes[rnd_idx + 1];
|
|
|
|
}
|
|
|
|
level--;
|
|
|
|
}
|
|
|
|
// There is a special case where x could still be the head_
|
|
|
|
// (note that the head_ contains no key).
|
2022-08-11 00:34:38 +00:00
|
|
|
return x == head_ && head_ != nullptr ? head_->Next(0) : x;
|
2021-08-11 01:07:48 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
Re-implement GetApproximateMemTableStats for skip lists (#13047)
Summary:
GetApproximateMemTableStats() could return some bad results with the standard skip list memtable. See this new db_bench test showing the dismal distribution of results when the actual number of entries in range is 1000:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.391 micros/op 718915 ops/sec 1.391 seconds 1000000 operations; 11.7 MB/s
approximatememtablestats : 3.711 micros/op 269492 ops/sec 3.711 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 2344.1611 StdDev: 26587.27
Min: 0 Median: 965.8555 Max: 835273
Percentiles: P50: 965.86 P75: 1610.77 P99: 12618.01 P99.9: 74991.58 P99.99: 830970.97
------------------------------------------------------
[ 0, 1 ] 131344 13.134% 13.134% ###
( 1, 2 ] 115 0.011% 13.146%
( 2, 3 ] 106 0.011% 13.157%
( 3, 4 ] 190 0.019% 13.176%
( 4, 6 ] 214 0.021% 13.197%
( 6, 10 ] 522 0.052% 13.249%
( 10, 15 ] 748 0.075% 13.324%
( 15, 22 ] 1002 0.100% 13.424%
( 22, 34 ] 1948 0.195% 13.619%
( 34, 51 ] 3067 0.307% 13.926%
( 51, 76 ] 4213 0.421% 14.347%
( 76, 110 ] 5721 0.572% 14.919%
( 110, 170 ] 11375 1.137% 16.056%
( 170, 250 ] 17928 1.793% 17.849%
( 250, 380 ] 36597 3.660% 21.509% #
( 380, 580 ] 77882 7.788% 29.297% ##
( 580, 870 ] 160193 16.019% 45.317% ###
( 870, 1300 ] 210098 21.010% 66.326% ####
( 1300, 1900 ] 167461 16.746% 83.072% ###
( 1900, 2900 ] 78678 7.868% 90.940% ##
( 2900, 4400 ] 47743 4.774% 95.715% #
( 4400, 6600 ] 17650 1.765% 97.480%
( 6600, 9900 ] 11895 1.190% 98.669%
( 9900, 14000 ] 4993 0.499% 99.168%
( 14000, 22000 ] 2384 0.238% 99.407%
( 22000, 33000 ] 1966 0.197% 99.603%
( 50000, 75000 ] 2968 0.297% 99.900%
( 570000, 860000 ] 999 0.100% 100.000%
readrandom : 1.967 micros/op 508487 ops/sec 1.967 seconds 1000000 operations; 8.2 MB/s (1000000 of 1000000 found)
```
Perhaps the only good thing to say about the old implementation was that it was fast, though apparently not that fast.
I've implemented a much more robust and reasonably fast new version of the function. It's still logarithmic but with some larger constant factors. The standard deviation from true count is around 20% or less, and roughly the CPU cost of two memtable point look-ups. See code comments for detail.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.478 micros/op 676434 ops/sec 1.478 seconds 1000000 operations; 11.0 MB/s
approximatememtablestats : 2.694 micros/op 371157 ops/sec 2.694 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 1073.5158 StdDev: 197.80
Min: 608 Median: 1079.9506 Max: 2176
Percentiles: P50: 1079.95 P75: 1223.69 P99: 1852.36 P99.9: 1898.70 P99.99: 2176.00
------------------------------------------------------
( 580, 870 ] 134848 13.485% 13.485% ###
( 870, 1300 ] 747868 74.787% 88.272% ###############
( 1300, 1900 ] 116536 11.654% 99.925% ##
( 1900, 2900 ] 748 0.075% 100.000%
readrandom : 1.997 micros/op 500654 ops/sec 1.997 seconds 1000000 operations; 8.1 MB/s (1000000 of 1000000 found)
```
We can already see that the distribution of results is dramatically better and wonderfully normal-looking, with relative standard deviation around 20%. The function is also FASTER, at least with these parameters. Let's look how this behavior generalizes, first *much* larger range:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=30000
filluniquerandom : 1.390 micros/op 719654 ops/sec 1.376 seconds 990000 operations; 11.7 MB/s
approximatememtablestats : 1.129 micros/op 885649 ops/sec 1.129 seconds 1000000 operations;
Reported entry count stats (expected 30000):
Count: 1000000 Average: 31098.8795 StdDev: 3601.47
Min: 21504 Median: 29333.9303 Max: 43008
Percentiles: P50: 29333.93 P75: 33018.00 P99: 43008.00 P99.9: 43008.00 P99.99: 43008.00
------------------------------------------------------
( 14000, 22000 ] 408 0.041% 0.041%
( 22000, 33000 ] 749327 74.933% 74.974% ###############
( 33000, 50000 ] 250265 25.027% 100.000% #####
readrandom : 1.894 micros/op 528083 ops/sec 1.894 seconds 1000000 operations; 8.5 MB/s (989989 of 1000000 found)
```
This is *even faster* and relatively *more accurate*, with relative standard deviation closer to 10%. Code comments explain why. Now let's look at smaller ranges. Implementation quirks or conveniences:
* When actual number in range is >= 40, the minimum return value is 40.
* When the actual is <= 10, it is guaranteed to return that actual number.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=75
...
filluniquerandom : 1.417 micros/op 705668 ops/sec 1.417 seconds 999975 operations; 11.4 MB/s
approximatememtablestats : 3.342 micros/op 299197 ops/sec 3.342 seconds 1000000 operations;
Reported entry count stats (expected 75):
Count: 1000000 Average: 75.1210 StdDev: 15.02
Min: 40 Median: 71.9395 Max: 256
Percentiles: P50: 71.94 P75: 89.69 P99: 119.12 P99.9: 166.68 P99.99: 229.78
------------------------------------------------------
( 34, 51 ] 38867 3.887% 3.887% #
( 51, 76 ] 550554 55.055% 58.942% ###########
( 76, 110 ] 398854 39.885% 98.828% ########
( 110, 170 ] 11353 1.135% 99.963%
( 170, 250 ] 364 0.036% 99.999%
( 250, 380 ] 8 0.001% 100.000%
readrandom : 1.861 micros/op 537224 ops/sec 1.861 seconds 1000000 operations; 8.7 MB/s (999974 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=25
...
filluniquerandom : 1.501 micros/op 666283 ops/sec 1.501 seconds 1000000 operations; 10.8 MB/s
approximatememtablestats : 5.118 micros/op 195401 ops/sec 5.118 seconds 1000000 operations;
Reported entry count stats (expected 25):
Count: 1000000 Average: 26.2392 StdDev: 4.58
Min: 25 Median: 28.4590 Max: 72
Percentiles: P50: 28.46 P75: 31.69 P99: 49.27 P99.9: 67.95 P99.99: 72.00
------------------------------------------------------
( 22, 34 ] 928936 92.894% 92.894% ###################
( 34, 51 ] 67960 6.796% 99.690% #
( 51, 76 ] 3104 0.310% 100.000%
readrandom : 1.892 micros/op 528595 ops/sec 1.892 seconds 1000000 operations; 8.6 MB/s (1000000 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=10
...
filluniquerandom : 1.642 micros/op 608916 ops/sec 1.642 seconds 1000000 operations; 9.9 MB/s
approximatememtablestats : 3.042 micros/op 328721 ops/sec 3.042 seconds 1000000 operations;
Reported entry count stats (expected 10):
Count: 1000000 Average: 10.0000 StdDev: 0.00
Min: 10 Median: 10.0000 Max: 10
Percentiles: P50: 10.00 P75: 10.00 P99: 10.00 P99.9: 10.00 P99.99: 10.00
------------------------------------------------------
( 6, 10 ] 1000000 100.000% 100.000% ####################
readrandom : 1.805 micros/op 554126 ops/sec 1.805 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
```
Remarkably consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13047
Test Plan: new db_bench test for both performance and accuracy (see above); added to crash test; unit test updated.
Reviewed By: cbi42
Differential Revision: D63722003
Pulled By: pdillinger
fbshipit-source-id: cfc8613c085e87c17ecec22d82601aac2a5a1b26
2024-10-02 21:25:50 +00:00
|
|
|
uint64_t InlineSkipList<Comparator>::ApproximateNumEntries(
|
|
|
|
const Slice& start_ikey, const Slice& end_ikey) const {
|
|
|
|
// The number of entries at a given level for the given range, in terms of
|
|
|
|
// the actual number of entries in that range (level 0), follows a binomial
|
|
|
|
// distribution, which is very well approximated by the Poisson distribution.
|
|
|
|
// That has stddev sqrt(x) where x is the expected number of entries (mean)
|
|
|
|
// at this level, and the best predictor of x is the number of observed
|
|
|
|
// entries (at this level). To predict the number of entries on level 0 we use
|
|
|
|
// x * kBranchinng ^ level. From the standard deviation, the P99+ relative
|
|
|
|
// error is roughly 3 * sqrt(x) / x. Thus, a reasonable approach would be to
|
|
|
|
// find the smallest level with at least some moderate constant number entries
|
|
|
|
// in range. E.g. with at least ~40 entries, we expect P99+ relative error
|
|
|
|
// (approximation accuracy) of ~ 50% = 3 * sqrt(40) / 40; P95 error of
|
|
|
|
// ~30%; P75 error of < 20%.
|
|
|
|
//
|
|
|
|
// However, there are two issues with this approach, and an observation:
|
|
|
|
// * Pointer chasing on the larger (bottom) levels is much slower because of
|
|
|
|
// cache hierarchy effects, so when the result is smaller, getting the result
|
|
|
|
// will be substantially slower, despite traversing a similar number of
|
|
|
|
// entries. (We could be clever about pipelining our pointer chasing but
|
|
|
|
// that's complicated.)
|
|
|
|
// * The larger (bottom) levels also have lower variance because there's a
|
|
|
|
// chance (or certainty) that we reach level 0 and return the exact answer.
|
|
|
|
// * For applications in query planning, we can also tolerate more variance on
|
|
|
|
// small results because the impact of misestimating is likely smaller.
|
|
|
|
//
|
|
|
|
// These factors point us to an approach in which we have a higher minimum
|
|
|
|
// threshold number of samples for higher levels and lower for lower levels
|
|
|
|
// (see sufficient_samples below). This seems to yield roughly consistent
|
|
|
|
// relative error (stddev around 20%, less for large results) and roughly
|
|
|
|
// consistent query time around the time of two memtable point queries.
|
|
|
|
//
|
|
|
|
// Engineering observation: it is tempting to think that taking into account
|
|
|
|
// what we already found in how many entries occur on higher levels, not just
|
|
|
|
// the first iterated level with a sufficient number of samples, would yield
|
|
|
|
// a more accurate estimate. But that doesn't work because of the particular
|
|
|
|
// correlations and independences of the data: each level higher is just an
|
|
|
|
// independently probabilistic filtering of the level below it. That
|
|
|
|
// filtering from level l to l+1 has no more information about levels
|
|
|
|
// 0 .. l-1 than we can get from level l. The structure of RandomHeight() is
|
|
|
|
// a clue to these correlations and independences.
|
|
|
|
|
|
|
|
Node* lb = head_;
|
|
|
|
Node* ub = nullptr;
|
2015-11-24 21:01:09 +00:00
|
|
|
uint64_t count = 0;
|
Re-implement GetApproximateMemTableStats for skip lists (#13047)
Summary:
GetApproximateMemTableStats() could return some bad results with the standard skip list memtable. See this new db_bench test showing the dismal distribution of results when the actual number of entries in range is 1000:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.391 micros/op 718915 ops/sec 1.391 seconds 1000000 operations; 11.7 MB/s
approximatememtablestats : 3.711 micros/op 269492 ops/sec 3.711 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 2344.1611 StdDev: 26587.27
Min: 0 Median: 965.8555 Max: 835273
Percentiles: P50: 965.86 P75: 1610.77 P99: 12618.01 P99.9: 74991.58 P99.99: 830970.97
------------------------------------------------------
[ 0, 1 ] 131344 13.134% 13.134% ###
( 1, 2 ] 115 0.011% 13.146%
( 2, 3 ] 106 0.011% 13.157%
( 3, 4 ] 190 0.019% 13.176%
( 4, 6 ] 214 0.021% 13.197%
( 6, 10 ] 522 0.052% 13.249%
( 10, 15 ] 748 0.075% 13.324%
( 15, 22 ] 1002 0.100% 13.424%
( 22, 34 ] 1948 0.195% 13.619%
( 34, 51 ] 3067 0.307% 13.926%
( 51, 76 ] 4213 0.421% 14.347%
( 76, 110 ] 5721 0.572% 14.919%
( 110, 170 ] 11375 1.137% 16.056%
( 170, 250 ] 17928 1.793% 17.849%
( 250, 380 ] 36597 3.660% 21.509% #
( 380, 580 ] 77882 7.788% 29.297% ##
( 580, 870 ] 160193 16.019% 45.317% ###
( 870, 1300 ] 210098 21.010% 66.326% ####
( 1300, 1900 ] 167461 16.746% 83.072% ###
( 1900, 2900 ] 78678 7.868% 90.940% ##
( 2900, 4400 ] 47743 4.774% 95.715% #
( 4400, 6600 ] 17650 1.765% 97.480%
( 6600, 9900 ] 11895 1.190% 98.669%
( 9900, 14000 ] 4993 0.499% 99.168%
( 14000, 22000 ] 2384 0.238% 99.407%
( 22000, 33000 ] 1966 0.197% 99.603%
( 50000, 75000 ] 2968 0.297% 99.900%
( 570000, 860000 ] 999 0.100% 100.000%
readrandom : 1.967 micros/op 508487 ops/sec 1.967 seconds 1000000 operations; 8.2 MB/s (1000000 of 1000000 found)
```
Perhaps the only good thing to say about the old implementation was that it was fast, though apparently not that fast.
I've implemented a much more robust and reasonably fast new version of the function. It's still logarithmic but with some larger constant factors. The standard deviation from true count is around 20% or less, and roughly the CPU cost of two memtable point look-ups. See code comments for detail.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.478 micros/op 676434 ops/sec 1.478 seconds 1000000 operations; 11.0 MB/s
approximatememtablestats : 2.694 micros/op 371157 ops/sec 2.694 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 1073.5158 StdDev: 197.80
Min: 608 Median: 1079.9506 Max: 2176
Percentiles: P50: 1079.95 P75: 1223.69 P99: 1852.36 P99.9: 1898.70 P99.99: 2176.00
------------------------------------------------------
( 580, 870 ] 134848 13.485% 13.485% ###
( 870, 1300 ] 747868 74.787% 88.272% ###############
( 1300, 1900 ] 116536 11.654% 99.925% ##
( 1900, 2900 ] 748 0.075% 100.000%
readrandom : 1.997 micros/op 500654 ops/sec 1.997 seconds 1000000 operations; 8.1 MB/s (1000000 of 1000000 found)
```
We can already see that the distribution of results is dramatically better and wonderfully normal-looking, with relative standard deviation around 20%. The function is also FASTER, at least with these parameters. Let's look how this behavior generalizes, first *much* larger range:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=30000
filluniquerandom : 1.390 micros/op 719654 ops/sec 1.376 seconds 990000 operations; 11.7 MB/s
approximatememtablestats : 1.129 micros/op 885649 ops/sec 1.129 seconds 1000000 operations;
Reported entry count stats (expected 30000):
Count: 1000000 Average: 31098.8795 StdDev: 3601.47
Min: 21504 Median: 29333.9303 Max: 43008
Percentiles: P50: 29333.93 P75: 33018.00 P99: 43008.00 P99.9: 43008.00 P99.99: 43008.00
------------------------------------------------------
( 14000, 22000 ] 408 0.041% 0.041%
( 22000, 33000 ] 749327 74.933% 74.974% ###############
( 33000, 50000 ] 250265 25.027% 100.000% #####
readrandom : 1.894 micros/op 528083 ops/sec 1.894 seconds 1000000 operations; 8.5 MB/s (989989 of 1000000 found)
```
This is *even faster* and relatively *more accurate*, with relative standard deviation closer to 10%. Code comments explain why. Now let's look at smaller ranges. Implementation quirks or conveniences:
* When actual number in range is >= 40, the minimum return value is 40.
* When the actual is <= 10, it is guaranteed to return that actual number.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=75
...
filluniquerandom : 1.417 micros/op 705668 ops/sec 1.417 seconds 999975 operations; 11.4 MB/s
approximatememtablestats : 3.342 micros/op 299197 ops/sec 3.342 seconds 1000000 operations;
Reported entry count stats (expected 75):
Count: 1000000 Average: 75.1210 StdDev: 15.02
Min: 40 Median: 71.9395 Max: 256
Percentiles: P50: 71.94 P75: 89.69 P99: 119.12 P99.9: 166.68 P99.99: 229.78
------------------------------------------------------
( 34, 51 ] 38867 3.887% 3.887% #
( 51, 76 ] 550554 55.055% 58.942% ###########
( 76, 110 ] 398854 39.885% 98.828% ########
( 110, 170 ] 11353 1.135% 99.963%
( 170, 250 ] 364 0.036% 99.999%
( 250, 380 ] 8 0.001% 100.000%
readrandom : 1.861 micros/op 537224 ops/sec 1.861 seconds 1000000 operations; 8.7 MB/s (999974 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=25
...
filluniquerandom : 1.501 micros/op 666283 ops/sec 1.501 seconds 1000000 operations; 10.8 MB/s
approximatememtablestats : 5.118 micros/op 195401 ops/sec 5.118 seconds 1000000 operations;
Reported entry count stats (expected 25):
Count: 1000000 Average: 26.2392 StdDev: 4.58
Min: 25 Median: 28.4590 Max: 72
Percentiles: P50: 28.46 P75: 31.69 P99: 49.27 P99.9: 67.95 P99.99: 72.00
------------------------------------------------------
( 22, 34 ] 928936 92.894% 92.894% ###################
( 34, 51 ] 67960 6.796% 99.690% #
( 51, 76 ] 3104 0.310% 100.000%
readrandom : 1.892 micros/op 528595 ops/sec 1.892 seconds 1000000 operations; 8.6 MB/s (1000000 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=10
...
filluniquerandom : 1.642 micros/op 608916 ops/sec 1.642 seconds 1000000 operations; 9.9 MB/s
approximatememtablestats : 3.042 micros/op 328721 ops/sec 3.042 seconds 1000000 operations;
Reported entry count stats (expected 10):
Count: 1000000 Average: 10.0000 StdDev: 0.00
Min: 10 Median: 10.0000 Max: 10
Percentiles: P50: 10.00 P75: 10.00 P99: 10.00 P99.9: 10.00 P99.99: 10.00
------------------------------------------------------
( 6, 10 ] 1000000 100.000% 100.000% ####################
readrandom : 1.805 micros/op 554126 ops/sec 1.805 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
```
Remarkably consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13047
Test Plan: new db_bench test for both performance and accuracy (see above); added to crash test; unit test updated.
Reviewed By: cbi42
Differential Revision: D63722003
Pulled By: pdillinger
fbshipit-source-id: cfc8613c085e87c17ecec22d82601aac2a5a1b26
2024-10-02 21:25:50 +00:00
|
|
|
for (int level = GetMaxHeight() - 1; level >= 0; level--) {
|
|
|
|
auto sufficient_samples = static_cast<uint64_t>(level) * kBranching_ + 10U;
|
|
|
|
if (count >= sufficient_samples) {
|
|
|
|
// No more counting; apply powers of kBranching and avoid floating point
|
|
|
|
count *= kBranching_;
|
|
|
|
continue;
|
2017-10-05 01:03:29 +00:00
|
|
|
}
|
Re-implement GetApproximateMemTableStats for skip lists (#13047)
Summary:
GetApproximateMemTableStats() could return some bad results with the standard skip list memtable. See this new db_bench test showing the dismal distribution of results when the actual number of entries in range is 1000:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.391 micros/op 718915 ops/sec 1.391 seconds 1000000 operations; 11.7 MB/s
approximatememtablestats : 3.711 micros/op 269492 ops/sec 3.711 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 2344.1611 StdDev: 26587.27
Min: 0 Median: 965.8555 Max: 835273
Percentiles: P50: 965.86 P75: 1610.77 P99: 12618.01 P99.9: 74991.58 P99.99: 830970.97
------------------------------------------------------
[ 0, 1 ] 131344 13.134% 13.134% ###
( 1, 2 ] 115 0.011% 13.146%
( 2, 3 ] 106 0.011% 13.157%
( 3, 4 ] 190 0.019% 13.176%
( 4, 6 ] 214 0.021% 13.197%
( 6, 10 ] 522 0.052% 13.249%
( 10, 15 ] 748 0.075% 13.324%
( 15, 22 ] 1002 0.100% 13.424%
( 22, 34 ] 1948 0.195% 13.619%
( 34, 51 ] 3067 0.307% 13.926%
( 51, 76 ] 4213 0.421% 14.347%
( 76, 110 ] 5721 0.572% 14.919%
( 110, 170 ] 11375 1.137% 16.056%
( 170, 250 ] 17928 1.793% 17.849%
( 250, 380 ] 36597 3.660% 21.509% #
( 380, 580 ] 77882 7.788% 29.297% ##
( 580, 870 ] 160193 16.019% 45.317% ###
( 870, 1300 ] 210098 21.010% 66.326% ####
( 1300, 1900 ] 167461 16.746% 83.072% ###
( 1900, 2900 ] 78678 7.868% 90.940% ##
( 2900, 4400 ] 47743 4.774% 95.715% #
( 4400, 6600 ] 17650 1.765% 97.480%
( 6600, 9900 ] 11895 1.190% 98.669%
( 9900, 14000 ] 4993 0.499% 99.168%
( 14000, 22000 ] 2384 0.238% 99.407%
( 22000, 33000 ] 1966 0.197% 99.603%
( 50000, 75000 ] 2968 0.297% 99.900%
( 570000, 860000 ] 999 0.100% 100.000%
readrandom : 1.967 micros/op 508487 ops/sec 1.967 seconds 1000000 operations; 8.2 MB/s (1000000 of 1000000 found)
```
Perhaps the only good thing to say about the old implementation was that it was fast, though apparently not that fast.
I've implemented a much more robust and reasonably fast new version of the function. It's still logarithmic but with some larger constant factors. The standard deviation from true count is around 20% or less, and roughly the CPU cost of two memtable point look-ups. See code comments for detail.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.478 micros/op 676434 ops/sec 1.478 seconds 1000000 operations; 11.0 MB/s
approximatememtablestats : 2.694 micros/op 371157 ops/sec 2.694 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 1073.5158 StdDev: 197.80
Min: 608 Median: 1079.9506 Max: 2176
Percentiles: P50: 1079.95 P75: 1223.69 P99: 1852.36 P99.9: 1898.70 P99.99: 2176.00
------------------------------------------------------
( 580, 870 ] 134848 13.485% 13.485% ###
( 870, 1300 ] 747868 74.787% 88.272% ###############
( 1300, 1900 ] 116536 11.654% 99.925% ##
( 1900, 2900 ] 748 0.075% 100.000%
readrandom : 1.997 micros/op 500654 ops/sec 1.997 seconds 1000000 operations; 8.1 MB/s (1000000 of 1000000 found)
```
We can already see that the distribution of results is dramatically better and wonderfully normal-looking, with relative standard deviation around 20%. The function is also FASTER, at least with these parameters. Let's look how this behavior generalizes, first *much* larger range:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=30000
filluniquerandom : 1.390 micros/op 719654 ops/sec 1.376 seconds 990000 operations; 11.7 MB/s
approximatememtablestats : 1.129 micros/op 885649 ops/sec 1.129 seconds 1000000 operations;
Reported entry count stats (expected 30000):
Count: 1000000 Average: 31098.8795 StdDev: 3601.47
Min: 21504 Median: 29333.9303 Max: 43008
Percentiles: P50: 29333.93 P75: 33018.00 P99: 43008.00 P99.9: 43008.00 P99.99: 43008.00
------------------------------------------------------
( 14000, 22000 ] 408 0.041% 0.041%
( 22000, 33000 ] 749327 74.933% 74.974% ###############
( 33000, 50000 ] 250265 25.027% 100.000% #####
readrandom : 1.894 micros/op 528083 ops/sec 1.894 seconds 1000000 operations; 8.5 MB/s (989989 of 1000000 found)
```
This is *even faster* and relatively *more accurate*, with relative standard deviation closer to 10%. Code comments explain why. Now let's look at smaller ranges. Implementation quirks or conveniences:
* When actual number in range is >= 40, the minimum return value is 40.
* When the actual is <= 10, it is guaranteed to return that actual number.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=75
...
filluniquerandom : 1.417 micros/op 705668 ops/sec 1.417 seconds 999975 operations; 11.4 MB/s
approximatememtablestats : 3.342 micros/op 299197 ops/sec 3.342 seconds 1000000 operations;
Reported entry count stats (expected 75):
Count: 1000000 Average: 75.1210 StdDev: 15.02
Min: 40 Median: 71.9395 Max: 256
Percentiles: P50: 71.94 P75: 89.69 P99: 119.12 P99.9: 166.68 P99.99: 229.78
------------------------------------------------------
( 34, 51 ] 38867 3.887% 3.887% #
( 51, 76 ] 550554 55.055% 58.942% ###########
( 76, 110 ] 398854 39.885% 98.828% ########
( 110, 170 ] 11353 1.135% 99.963%
( 170, 250 ] 364 0.036% 99.999%
( 250, 380 ] 8 0.001% 100.000%
readrandom : 1.861 micros/op 537224 ops/sec 1.861 seconds 1000000 operations; 8.7 MB/s (999974 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=25
...
filluniquerandom : 1.501 micros/op 666283 ops/sec 1.501 seconds 1000000 operations; 10.8 MB/s
approximatememtablestats : 5.118 micros/op 195401 ops/sec 5.118 seconds 1000000 operations;
Reported entry count stats (expected 25):
Count: 1000000 Average: 26.2392 StdDev: 4.58
Min: 25 Median: 28.4590 Max: 72
Percentiles: P50: 28.46 P75: 31.69 P99: 49.27 P99.9: 67.95 P99.99: 72.00
------------------------------------------------------
( 22, 34 ] 928936 92.894% 92.894% ###################
( 34, 51 ] 67960 6.796% 99.690% #
( 51, 76 ] 3104 0.310% 100.000%
readrandom : 1.892 micros/op 528595 ops/sec 1.892 seconds 1000000 operations; 8.6 MB/s (1000000 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=10
...
filluniquerandom : 1.642 micros/op 608916 ops/sec 1.642 seconds 1000000 operations; 9.9 MB/s
approximatememtablestats : 3.042 micros/op 328721 ops/sec 3.042 seconds 1000000 operations;
Reported entry count stats (expected 10):
Count: 1000000 Average: 10.0000 StdDev: 0.00
Min: 10 Median: 10.0000 Max: 10
Percentiles: P50: 10.00 P75: 10.00 P99: 10.00 P99.9: 10.00 P99.99: 10.00
------------------------------------------------------
( 6, 10 ] 1000000 100.000% 100.000% ####################
readrandom : 1.805 micros/op 554126 ops/sec 1.805 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
```
Remarkably consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13047
Test Plan: new db_bench test for both performance and accuracy (see above); added to crash test; unit test updated.
Reviewed By: cbi42
Differential Revision: D63722003
Pulled By: pdillinger
fbshipit-source-id: cfc8613c085e87c17ecec22d82601aac2a5a1b26
2024-10-02 21:25:50 +00:00
|
|
|
count = 0;
|
|
|
|
Node* next;
|
|
|
|
// Get a more precise lower bound (for start key)
|
|
|
|
for (;;) {
|
|
|
|
next = lb->Next(level);
|
|
|
|
if (next == ub) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(next != nullptr);
|
|
|
|
if (compare_(next->Key(), start_ikey) >= 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
lb = next;
|
|
|
|
}
|
|
|
|
// Count entries on this level until upper bound (for end key)
|
|
|
|
for (;;) {
|
|
|
|
if (next == ub) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(next != nullptr);
|
|
|
|
if (compare_(next->Key(), end_ikey) >= 0) {
|
|
|
|
// Save refined upper bound to potentially save key comparison
|
|
|
|
ub = next;
|
|
|
|
break;
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
count++;
|
Re-implement GetApproximateMemTableStats for skip lists (#13047)
Summary:
GetApproximateMemTableStats() could return some bad results with the standard skip list memtable. See this new db_bench test showing the dismal distribution of results when the actual number of entries in range is 1000:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.391 micros/op 718915 ops/sec 1.391 seconds 1000000 operations; 11.7 MB/s
approximatememtablestats : 3.711 micros/op 269492 ops/sec 3.711 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 2344.1611 StdDev: 26587.27
Min: 0 Median: 965.8555 Max: 835273
Percentiles: P50: 965.86 P75: 1610.77 P99: 12618.01 P99.9: 74991.58 P99.99: 830970.97
------------------------------------------------------
[ 0, 1 ] 131344 13.134% 13.134% ###
( 1, 2 ] 115 0.011% 13.146%
( 2, 3 ] 106 0.011% 13.157%
( 3, 4 ] 190 0.019% 13.176%
( 4, 6 ] 214 0.021% 13.197%
( 6, 10 ] 522 0.052% 13.249%
( 10, 15 ] 748 0.075% 13.324%
( 15, 22 ] 1002 0.100% 13.424%
( 22, 34 ] 1948 0.195% 13.619%
( 34, 51 ] 3067 0.307% 13.926%
( 51, 76 ] 4213 0.421% 14.347%
( 76, 110 ] 5721 0.572% 14.919%
( 110, 170 ] 11375 1.137% 16.056%
( 170, 250 ] 17928 1.793% 17.849%
( 250, 380 ] 36597 3.660% 21.509% #
( 380, 580 ] 77882 7.788% 29.297% ##
( 580, 870 ] 160193 16.019% 45.317% ###
( 870, 1300 ] 210098 21.010% 66.326% ####
( 1300, 1900 ] 167461 16.746% 83.072% ###
( 1900, 2900 ] 78678 7.868% 90.940% ##
( 2900, 4400 ] 47743 4.774% 95.715% #
( 4400, 6600 ] 17650 1.765% 97.480%
( 6600, 9900 ] 11895 1.190% 98.669%
( 9900, 14000 ] 4993 0.499% 99.168%
( 14000, 22000 ] 2384 0.238% 99.407%
( 22000, 33000 ] 1966 0.197% 99.603%
( 50000, 75000 ] 2968 0.297% 99.900%
( 570000, 860000 ] 999 0.100% 100.000%
readrandom : 1.967 micros/op 508487 ops/sec 1.967 seconds 1000000 operations; 8.2 MB/s (1000000 of 1000000 found)
```
Perhaps the only good thing to say about the old implementation was that it was fast, though apparently not that fast.
I've implemented a much more robust and reasonably fast new version of the function. It's still logarithmic but with some larger constant factors. The standard deviation from true count is around 20% or less, and roughly the CPU cost of two memtable point look-ups. See code comments for detail.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.478 micros/op 676434 ops/sec 1.478 seconds 1000000 operations; 11.0 MB/s
approximatememtablestats : 2.694 micros/op 371157 ops/sec 2.694 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 1073.5158 StdDev: 197.80
Min: 608 Median: 1079.9506 Max: 2176
Percentiles: P50: 1079.95 P75: 1223.69 P99: 1852.36 P99.9: 1898.70 P99.99: 2176.00
------------------------------------------------------
( 580, 870 ] 134848 13.485% 13.485% ###
( 870, 1300 ] 747868 74.787% 88.272% ###############
( 1300, 1900 ] 116536 11.654% 99.925% ##
( 1900, 2900 ] 748 0.075% 100.000%
readrandom : 1.997 micros/op 500654 ops/sec 1.997 seconds 1000000 operations; 8.1 MB/s (1000000 of 1000000 found)
```
We can already see that the distribution of results is dramatically better and wonderfully normal-looking, with relative standard deviation around 20%. The function is also FASTER, at least with these parameters. Let's look how this behavior generalizes, first *much* larger range:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=30000
filluniquerandom : 1.390 micros/op 719654 ops/sec 1.376 seconds 990000 operations; 11.7 MB/s
approximatememtablestats : 1.129 micros/op 885649 ops/sec 1.129 seconds 1000000 operations;
Reported entry count stats (expected 30000):
Count: 1000000 Average: 31098.8795 StdDev: 3601.47
Min: 21504 Median: 29333.9303 Max: 43008
Percentiles: P50: 29333.93 P75: 33018.00 P99: 43008.00 P99.9: 43008.00 P99.99: 43008.00
------------------------------------------------------
( 14000, 22000 ] 408 0.041% 0.041%
( 22000, 33000 ] 749327 74.933% 74.974% ###############
( 33000, 50000 ] 250265 25.027% 100.000% #####
readrandom : 1.894 micros/op 528083 ops/sec 1.894 seconds 1000000 operations; 8.5 MB/s (989989 of 1000000 found)
```
This is *even faster* and relatively *more accurate*, with relative standard deviation closer to 10%. Code comments explain why. Now let's look at smaller ranges. Implementation quirks or conveniences:
* When actual number in range is >= 40, the minimum return value is 40.
* When the actual is <= 10, it is guaranteed to return that actual number.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=75
...
filluniquerandom : 1.417 micros/op 705668 ops/sec 1.417 seconds 999975 operations; 11.4 MB/s
approximatememtablestats : 3.342 micros/op 299197 ops/sec 3.342 seconds 1000000 operations;
Reported entry count stats (expected 75):
Count: 1000000 Average: 75.1210 StdDev: 15.02
Min: 40 Median: 71.9395 Max: 256
Percentiles: P50: 71.94 P75: 89.69 P99: 119.12 P99.9: 166.68 P99.99: 229.78
------------------------------------------------------
( 34, 51 ] 38867 3.887% 3.887% #
( 51, 76 ] 550554 55.055% 58.942% ###########
( 76, 110 ] 398854 39.885% 98.828% ########
( 110, 170 ] 11353 1.135% 99.963%
( 170, 250 ] 364 0.036% 99.999%
( 250, 380 ] 8 0.001% 100.000%
readrandom : 1.861 micros/op 537224 ops/sec 1.861 seconds 1000000 operations; 8.7 MB/s (999974 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=25
...
filluniquerandom : 1.501 micros/op 666283 ops/sec 1.501 seconds 1000000 operations; 10.8 MB/s
approximatememtablestats : 5.118 micros/op 195401 ops/sec 5.118 seconds 1000000 operations;
Reported entry count stats (expected 25):
Count: 1000000 Average: 26.2392 StdDev: 4.58
Min: 25 Median: 28.4590 Max: 72
Percentiles: P50: 28.46 P75: 31.69 P99: 49.27 P99.9: 67.95 P99.99: 72.00
------------------------------------------------------
( 22, 34 ] 928936 92.894% 92.894% ###################
( 34, 51 ] 67960 6.796% 99.690% #
( 51, 76 ] 3104 0.310% 100.000%
readrandom : 1.892 micros/op 528595 ops/sec 1.892 seconds 1000000 operations; 8.6 MB/s (1000000 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=10
...
filluniquerandom : 1.642 micros/op 608916 ops/sec 1.642 seconds 1000000 operations; 9.9 MB/s
approximatememtablestats : 3.042 micros/op 328721 ops/sec 3.042 seconds 1000000 operations;
Reported entry count stats (expected 10):
Count: 1000000 Average: 10.0000 StdDev: 0.00
Min: 10 Median: 10.0000 Max: 10
Percentiles: P50: 10.00 P75: 10.00 P99: 10.00 P99.9: 10.00 P99.99: 10.00
------------------------------------------------------
( 6, 10 ] 1000000 100.000% 100.000% ####################
readrandom : 1.805 micros/op 554126 ops/sec 1.805 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
```
Remarkably consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13047
Test Plan: new db_bench test for both performance and accuracy (see above); added to crash test; unit test updated.
Reviewed By: cbi42
Differential Revision: D63722003
Pulled By: pdillinger
fbshipit-source-id: cfc8613c085e87c17ecec22d82601aac2a5a1b26
2024-10-02 21:25:50 +00:00
|
|
|
next = next->Next(level);
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
}
|
Re-implement GetApproximateMemTableStats for skip lists (#13047)
Summary:
GetApproximateMemTableStats() could return some bad results with the standard skip list memtable. See this new db_bench test showing the dismal distribution of results when the actual number of entries in range is 1000:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.391 micros/op 718915 ops/sec 1.391 seconds 1000000 operations; 11.7 MB/s
approximatememtablestats : 3.711 micros/op 269492 ops/sec 3.711 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 2344.1611 StdDev: 26587.27
Min: 0 Median: 965.8555 Max: 835273
Percentiles: P50: 965.86 P75: 1610.77 P99: 12618.01 P99.9: 74991.58 P99.99: 830970.97
------------------------------------------------------
[ 0, 1 ] 131344 13.134% 13.134% ###
( 1, 2 ] 115 0.011% 13.146%
( 2, 3 ] 106 0.011% 13.157%
( 3, 4 ] 190 0.019% 13.176%
( 4, 6 ] 214 0.021% 13.197%
( 6, 10 ] 522 0.052% 13.249%
( 10, 15 ] 748 0.075% 13.324%
( 15, 22 ] 1002 0.100% 13.424%
( 22, 34 ] 1948 0.195% 13.619%
( 34, 51 ] 3067 0.307% 13.926%
( 51, 76 ] 4213 0.421% 14.347%
( 76, 110 ] 5721 0.572% 14.919%
( 110, 170 ] 11375 1.137% 16.056%
( 170, 250 ] 17928 1.793% 17.849%
( 250, 380 ] 36597 3.660% 21.509% #
( 380, 580 ] 77882 7.788% 29.297% ##
( 580, 870 ] 160193 16.019% 45.317% ###
( 870, 1300 ] 210098 21.010% 66.326% ####
( 1300, 1900 ] 167461 16.746% 83.072% ###
( 1900, 2900 ] 78678 7.868% 90.940% ##
( 2900, 4400 ] 47743 4.774% 95.715% #
( 4400, 6600 ] 17650 1.765% 97.480%
( 6600, 9900 ] 11895 1.190% 98.669%
( 9900, 14000 ] 4993 0.499% 99.168%
( 14000, 22000 ] 2384 0.238% 99.407%
( 22000, 33000 ] 1966 0.197% 99.603%
( 50000, 75000 ] 2968 0.297% 99.900%
( 570000, 860000 ] 999 0.100% 100.000%
readrandom : 1.967 micros/op 508487 ops/sec 1.967 seconds 1000000 operations; 8.2 MB/s (1000000 of 1000000 found)
```
Perhaps the only good thing to say about the old implementation was that it was fast, though apparently not that fast.
I've implemented a much more robust and reasonably fast new version of the function. It's still logarithmic but with some larger constant factors. The standard deviation from true count is around 20% or less, and roughly the CPU cost of two memtable point look-ups. See code comments for detail.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=1000
...
filluniquerandom : 1.478 micros/op 676434 ops/sec 1.478 seconds 1000000 operations; 11.0 MB/s
approximatememtablestats : 2.694 micros/op 371157 ops/sec 2.694 seconds 1000000 operations;
Reported entry count stats (expected 1000):
Count: 1000000 Average: 1073.5158 StdDev: 197.80
Min: 608 Median: 1079.9506 Max: 2176
Percentiles: P50: 1079.95 P75: 1223.69 P99: 1852.36 P99.9: 1898.70 P99.99: 2176.00
------------------------------------------------------
( 580, 870 ] 134848 13.485% 13.485% ###
( 870, 1300 ] 747868 74.787% 88.272% ###############
( 1300, 1900 ] 116536 11.654% 99.925% ##
( 1900, 2900 ] 748 0.075% 100.000%
readrandom : 1.997 micros/op 500654 ops/sec 1.997 seconds 1000000 operations; 8.1 MB/s (1000000 of 1000000 found)
```
We can already see that the distribution of results is dramatically better and wonderfully normal-looking, with relative standard deviation around 20%. The function is also FASTER, at least with these parameters. Let's look how this behavior generalizes, first *much* larger range:
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=30000
filluniquerandom : 1.390 micros/op 719654 ops/sec 1.376 seconds 990000 operations; 11.7 MB/s
approximatememtablestats : 1.129 micros/op 885649 ops/sec 1.129 seconds 1000000 operations;
Reported entry count stats (expected 30000):
Count: 1000000 Average: 31098.8795 StdDev: 3601.47
Min: 21504 Median: 29333.9303 Max: 43008
Percentiles: P50: 29333.93 P75: 33018.00 P99: 43008.00 P99.9: 43008.00 P99.99: 43008.00
------------------------------------------------------
( 14000, 22000 ] 408 0.041% 0.041%
( 22000, 33000 ] 749327 74.933% 74.974% ###############
( 33000, 50000 ] 250265 25.027% 100.000% #####
readrandom : 1.894 micros/op 528083 ops/sec 1.894 seconds 1000000 operations; 8.5 MB/s (989989 of 1000000 found)
```
This is *even faster* and relatively *more accurate*, with relative standard deviation closer to 10%. Code comments explain why. Now let's look at smaller ranges. Implementation quirks or conveniences:
* When actual number in range is >= 40, the minimum return value is 40.
* When the actual is <= 10, it is guaranteed to return that actual number.
```
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=75
...
filluniquerandom : 1.417 micros/op 705668 ops/sec 1.417 seconds 999975 operations; 11.4 MB/s
approximatememtablestats : 3.342 micros/op 299197 ops/sec 3.342 seconds 1000000 operations;
Reported entry count stats (expected 75):
Count: 1000000 Average: 75.1210 StdDev: 15.02
Min: 40 Median: 71.9395 Max: 256
Percentiles: P50: 71.94 P75: 89.69 P99: 119.12 P99.9: 166.68 P99.99: 229.78
------------------------------------------------------
( 34, 51 ] 38867 3.887% 3.887% #
( 51, 76 ] 550554 55.055% 58.942% ###########
( 76, 110 ] 398854 39.885% 98.828% ########
( 110, 170 ] 11353 1.135% 99.963%
( 170, 250 ] 364 0.036% 99.999%
( 250, 380 ] 8 0.001% 100.000%
readrandom : 1.861 micros/op 537224 ops/sec 1.861 seconds 1000000 operations; 8.7 MB/s (999974 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=25
...
filluniquerandom : 1.501 micros/op 666283 ops/sec 1.501 seconds 1000000 operations; 10.8 MB/s
approximatememtablestats : 5.118 micros/op 195401 ops/sec 5.118 seconds 1000000 operations;
Reported entry count stats (expected 25):
Count: 1000000 Average: 26.2392 StdDev: 4.58
Min: 25 Median: 28.4590 Max: 72
Percentiles: P50: 28.46 P75: 31.69 P99: 49.27 P99.9: 67.95 P99.99: 72.00
------------------------------------------------------
( 22, 34 ] 928936 92.894% 92.894% ###################
( 34, 51 ] 67960 6.796% 99.690% #
( 51, 76 ] 3104 0.310% 100.000%
readrandom : 1.892 micros/op 528595 ops/sec 1.892 seconds 1000000 operations; 8.6 MB/s (1000000 of 1000000 found)
$ ./db_bench --benchmarks=filluniquerandom,approximatememtablestats,readrandom --value_size=1 --num=1000000 --batch_size=10
...
filluniquerandom : 1.642 micros/op 608916 ops/sec 1.642 seconds 1000000 operations; 9.9 MB/s
approximatememtablestats : 3.042 micros/op 328721 ops/sec 3.042 seconds 1000000 operations;
Reported entry count stats (expected 10):
Count: 1000000 Average: 10.0000 StdDev: 0.00
Min: 10 Median: 10.0000 Max: 10
Percentiles: P50: 10.00 P75: 10.00 P99: 10.00 P99.9: 10.00 P99.99: 10.00
------------------------------------------------------
( 6, 10 ] 1000000 100.000% 100.000% ####################
readrandom : 1.805 micros/op 554126 ops/sec 1.805 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
```
Remarkably consistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13047
Test Plan: new db_bench test for both performance and accuracy (see above); added to crash test; unit test updated.
Reviewed By: cbi42
Differential Revision: D63722003
Pulled By: pdillinger
fbshipit-source-id: cfc8613c085e87c17ecec22d82601aac2a5a1b26
2024-10-02 21:25:50 +00:00
|
|
|
return count;
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
InlineSkipList<Comparator>::InlineSkipList(const Comparator cmp,
|
|
|
|
Allocator* allocator,
|
|
|
|
int32_t max_height,
|
|
|
|
int32_t branching_factor)
|
2017-10-19 17:48:47 +00:00
|
|
|
: kMaxHeight_(static_cast<uint16_t>(max_height)),
|
|
|
|
kBranching_(static_cast<uint16_t>(branching_factor)),
|
2015-11-24 21:01:09 +00:00
|
|
|
kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_),
|
|
|
|
allocator_(allocator),
|
Reorder field based on esan data
Summary:
Running. TEST_TMPDIR=/dev/shm ./buck-out/gen/rocks/tools/rocks_db_bench --benchmarks=readwhilewriting --num=5000000 -benchmark_write_rate_limit=2000000 --threads=32
Collected esan data and reorder field. Accesses to 4th and 6th fields take majority of the access. Group them. Overall, this struct takes 10%+ of the total accesses in the program. (637773011/6107964986)
==2433831== class rocksdb::InlineSkipList
==2433831== size = 48, count = 637773011, ratio = 112412, array access = 0
==2433831== # 0: offset = 0, size = 2, count = 455137, type = i16
==2433831== # 1: offset = 2, size = 2, count = 6, type = i16
==2433831== # 2: offset = 4, size = 4, count = 182303, type = i32
==2433831== # 3: offset = 8, size = 8, count = 263953900, type = %"class.rocksdb::MemTableRep::KeyComparator"*
==2433831== # 4: offset = 16, size = 8, count = 136409, type = %"class.rocksdb::Allocator"*
==2433831== # 5: offset = 24, size = 8, count = 366628820, type = %"struct.rocksdb::InlineSkipList<const rocksdb::MemTableRep::KeyComparator &>::Node"*
==2433831== # 6: offset = 32, size = 4, count = 6280031, type = %"struct.std::atomic" = type { %"struct.std::__atomic_base" }
==2433831== # 7: offset = 40, size = 8, count = 136405, type = %"struct.rocksdb::InlineSkipList<const rocksdb::MemTableRep::KeyComparator &>::Splice"*
==2433831==EfficiencySanitizer: total struct field access count = 6107964986
Before re-ordering
[trentxintong@devbig460.frc2 ~/fbsource/fbcode]$ fgrep readwhilewriting
without-ro.log
readwhilewriting : 0.036 micros/op 27545605 ops/sec; 26.8 MB/s
(45954 of 5000000 found)
readwhilewriting : 0.036 micros/op 28024240 ops/sec; 27.2 MB/s
(43158 of 5000000 found)
readwhilewriting : 0.037 micros/op 27345145 ops/sec; 27.1 MB/s
(46725 of 5000000 found)
readwhilewriting : 0.037 micros/op 27072588 ops/sec; 27.3 MB/s
(42605 of 5000000 found)
readwhilewriting : 0.034 micros/op 29578781 ops/sec; 28.3 MB/s
(44294 of 5000000 found)
readwhilewriting : 0.035 micros/op 28528304 ops/sec; 27.7 MB/s
(44176 of 5000000 found)
readwhilewriting : 0.037 micros/op 27075497 ops/sec; 26.5 MB/s
(43763 of 5000000 found)
readwhilewriting : 0.036 micros/op 28024117 ops/sec; 27.1 MB/s
(40622 of 5000000 found)
readwhilewriting : 0.037 micros/op 27078709 ops/sec; 27.6 MB/s
(47774 of 5000000 found)
readwhilewriting : 0.034 micros/op 29020689 ops/sec; 28.1 MB/s
(45066 of 5000000 found)
AVERAGE()=27.37 MB/s
After re-ordering
[trentxintong@devbig460.frc2 ~/fbsource/fbcode]$ fgrep readwhilewriting
ro.log
readwhilewriting : 0.036 micros/op 27542409 ops/sec; 27.7 MB/s
(46163 of 5000000 found)
readwhilewriting : 0.036 micros/op 28021148 ops/sec; 28.2 MB/s
(46155 of 5000000 found)
readwhilewriting : 0.036 micros/op 28021035 ops/sec; 27.3 MB/s
(44039 of 5000000 found)
readwhilewriting : 0.036 micros/op 27538659 ops/sec; 27.5 MB/s
(46781 of 5000000 found)
readwhilewriting : 0.036 micros/op 28028604 ops/sec; 27.6 MB/s
(44689 of 5000000 found)
readwhilewriting : 0.036 micros/op 27541452 ops/sec; 27.3 MB/s
(43156 of 5000000 found)
readwhilewriting : 0.034 micros/op 29041338 ops/sec; 28.8 MB/s
(44895 of 5000000 found)
readwhilewriting : 0.036 micros/op 27784974 ops/sec; 26.3 MB/s
(39963 of 5000000 found)
readwhilewriting : 0.036 micros/op 27538892 ops/sec; 28.1 MB/s
(46570 of 5000000 found)
readwhilewriting : 0.038 micros/op 26622473 ops/sec; 27.0 MB/s
(43236 of 5000000 found)
AVERAGE()=27.58 MB/s
Closes https://github.com/facebook/rocksdb/pull/3855
Reviewed By: siying
Differential Revision: D8048781
Pulled By: trentxintong
fbshipit-source-id: bc9807a9845e2a92cb171ce1ecb5a2c8a51f1481
2018-05-18 00:49:06 +00:00
|
|
|
compare_(cmp),
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
head_(AllocateNode(0, max_height)),
|
2015-11-24 21:01:09 +00:00
|
|
|
max_height_(1),
|
2016-11-22 22:06:54 +00:00
|
|
|
seq_splice_(AllocateSplice()) {
|
2015-11-24 21:01:09 +00:00
|
|
|
assert(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height));
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
assert(branching_factor > 1 &&
|
2015-11-24 21:01:09 +00:00
|
|
|
kBranching_ == static_cast<uint32_t>(branching_factor));
|
|
|
|
assert(kScaledInverseBranching_ > 0);
|
2016-11-22 22:06:54 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < kMaxHeight_; ++i) {
|
2015-11-24 21:01:09 +00:00
|
|
|
head_->SetNext(i, nullptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
char* InlineSkipList<Comparator>::AllocateKey(size_t key_size) {
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
return const_cast<char*>(AllocateNode(key_size, RandomHeight())->Key());
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Comparator>
|
|
|
|
typename InlineSkipList<Comparator>::Node*
|
|
|
|
InlineSkipList<Comparator>::AllocateNode(size_t key_size, int height) {
|
|
|
|
auto prefix = sizeof(std::atomic<Node*>) * (height - 1);
|
|
|
|
|
|
|
|
// prefix is space for the height - 1 pointers that we store before
|
|
|
|
// the Node instance (next_[-(height - 1) .. -1]). Node starts at
|
|
|
|
// raw + prefix, and holds the bottom-mode (level 0) skip list pointer
|
|
|
|
// next_[0]. key_size is the bytes for the key, which comes just after
|
|
|
|
// the Node.
|
|
|
|
char* raw = allocator_->AllocateAligned(prefix + sizeof(Node) + key_size);
|
|
|
|
Node* x = reinterpret_cast<Node*>(raw + prefix);
|
|
|
|
|
|
|
|
// Once we've linked the node into the skip list we don't actually need
|
|
|
|
// to know its height, because we can implicitly use the fact that we
|
|
|
|
// traversed into a node at level h to known that h is a valid level
|
|
|
|
// for that node. We need to convey the height to the Insert step,
|
|
|
|
// however, so that it can perform the proper links. Since we're not
|
|
|
|
// using the pointers at the moment, StashHeight temporarily borrow
|
|
|
|
// storage from next_[0] for that purpose.
|
|
|
|
x->StashHeight(height);
|
|
|
|
return x;
|
2015-11-24 21:29:50 +00:00
|
|
|
}
|
|
|
|
|
2016-11-13 21:00:52 +00:00
|
|
|
template <class Comparator>
|
2016-11-22 22:06:54 +00:00
|
|
|
typename InlineSkipList<Comparator>::Splice*
|
|
|
|
InlineSkipList<Comparator>::AllocateSplice() {
|
|
|
|
// size of prev_ and next_
|
|
|
|
size_t array_size = sizeof(Node*) * (kMaxHeight_ + 1);
|
|
|
|
char* raw = allocator_->AllocateAligned(sizeof(Splice) + array_size * 2);
|
|
|
|
Splice* splice = reinterpret_cast<Splice*>(raw);
|
|
|
|
splice->height_ = 0;
|
|
|
|
splice->prev_ = reinterpret_cast<Node**>(raw + sizeof(Splice));
|
|
|
|
splice->next_ = reinterpret_cast<Node**>(raw + sizeof(Splice) + array_size);
|
|
|
|
return splice;
|
2016-11-13 21:00:52 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 23:53:31 +00:00
|
|
|
template <class Comparator>
|
|
|
|
typename InlineSkipList<Comparator>::Splice*
|
|
|
|
InlineSkipList<Comparator>::AllocateSpliceOnHeap() {
|
|
|
|
size_t array_size = sizeof(Node*) * (kMaxHeight_ + 1);
|
|
|
|
char* raw = new char[sizeof(Splice) + array_size * 2];
|
|
|
|
Splice* splice = reinterpret_cast<Splice*>(raw);
|
|
|
|
splice->height_ = 0;
|
|
|
|
splice->prev_ = reinterpret_cast<Node**>(raw + sizeof(Splice));
|
|
|
|
splice->next_ = reinterpret_cast<Node**>(raw + sizeof(Splice) + array_size);
|
|
|
|
return splice;
|
|
|
|
}
|
|
|
|
|
2016-11-13 21:00:52 +00:00
|
|
|
template <class Comparator>
|
2018-02-01 02:45:49 +00:00
|
|
|
bool InlineSkipList<Comparator>::Insert(const char* key) {
|
|
|
|
return Insert<false>(key, seq_splice_, false);
|
2016-11-13 21:00:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <class Comparator>
|
2018-02-01 02:45:49 +00:00
|
|
|
bool InlineSkipList<Comparator>::InsertConcurrently(const char* key) {
|
2016-11-22 22:06:54 +00:00
|
|
|
Node* prev[kMaxPossibleHeight];
|
|
|
|
Node* next[kMaxPossibleHeight];
|
|
|
|
Splice splice;
|
|
|
|
splice.prev_ = prev;
|
|
|
|
splice.next_ = next;
|
2018-02-01 02:45:49 +00:00
|
|
|
return Insert<true>(key, &splice, false);
|
2016-11-13 21:00:52 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
2018-02-01 02:45:49 +00:00
|
|
|
bool InlineSkipList<Comparator>::InsertWithHint(const char* key, void** hint) {
|
2016-11-22 22:06:54 +00:00
|
|
|
assert(hint != nullptr);
|
|
|
|
Splice* splice = reinterpret_cast<Splice*>(*hint);
|
|
|
|
if (splice == nullptr) {
|
|
|
|
splice = AllocateSplice();
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
*hint = splice;
|
2016-11-13 21:00:52 +00:00
|
|
|
}
|
2018-02-01 02:45:49 +00:00
|
|
|
return Insert<false>(key, splice, true);
|
2016-11-13 21:00:52 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 23:53:31 +00:00
|
|
|
template <class Comparator>
|
|
|
|
bool InlineSkipList<Comparator>::InsertWithHintConcurrently(const char* key,
|
|
|
|
void** hint) {
|
|
|
|
assert(hint != nullptr);
|
|
|
|
Splice* splice = reinterpret_cast<Splice*>(*hint);
|
|
|
|
if (splice == nullptr) {
|
|
|
|
splice = AllocateSpliceOnHeap();
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
*hint = splice;
|
2019-09-12 23:53:31 +00:00
|
|
|
}
|
|
|
|
return Insert<true>(key, splice, true);
|
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
template <class Comparator>
|
2017-10-05 01:03:29 +00:00
|
|
|
template <bool prefetch_before>
|
2018-03-23 19:12:15 +00:00
|
|
|
void InlineSkipList<Comparator>::FindSpliceForLevel(const DecodedKey& key,
|
2016-11-22 22:06:54 +00:00
|
|
|
Node* before, Node* after,
|
|
|
|
int level, Node** out_prev,
|
|
|
|
Node** out_next) {
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
while (true) {
|
|
|
|
Node* next = before->Next(level);
|
2017-10-05 01:03:29 +00:00
|
|
|
if (next != nullptr) {
|
|
|
|
PREFETCH(next->Next(level), 0, 1);
|
|
|
|
}
|
|
|
|
if (prefetch_before == true) {
|
2022-10-28 20:16:50 +00:00
|
|
|
if (next != nullptr && level > 0) {
|
|
|
|
PREFETCH(next->Next(level - 1), 0, 1);
|
2017-10-05 01:03:29 +00:00
|
|
|
}
|
|
|
|
}
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
assert(before == head_ || next == nullptr ||
|
|
|
|
KeyIsAfterNode(next->Key(), before));
|
|
|
|
assert(before == head_ || KeyIsAfterNode(key, before));
|
|
|
|
if (next == after || !KeyIsAfterNode(key, next)) {
|
|
|
|
// found it
|
|
|
|
*out_prev = before;
|
|
|
|
*out_next = next;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
before = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Comparator>
|
2018-03-23 19:12:15 +00:00
|
|
|
void InlineSkipList<Comparator>::RecomputeSpliceLevels(const DecodedKey& key,
|
2016-11-22 22:06:54 +00:00
|
|
|
Splice* splice,
|
|
|
|
int recompute_level) {
|
|
|
|
assert(recompute_level > 0);
|
|
|
|
assert(recompute_level <= splice->height_);
|
|
|
|
for (int i = recompute_level - 1; i >= 0; --i) {
|
2017-10-05 01:03:29 +00:00
|
|
|
FindSpliceForLevel<true>(key, splice->prev_[i + 1], splice->next_[i + 1], i,
|
2022-10-28 20:16:50 +00:00
|
|
|
&splice->prev_[i], &splice->next_[i]);
|
2016-11-22 22:06:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Comparator>
|
|
|
|
template <bool UseCAS>
|
2018-02-01 02:45:49 +00:00
|
|
|
bool InlineSkipList<Comparator>::Insert(const char* key, Splice* splice,
|
2016-11-22 22:06:54 +00:00
|
|
|
bool allow_partial_splice_fix) {
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
Node* x = reinterpret_cast<Node*>(const_cast<char*>(key)) - 1;
|
2018-03-23 19:12:15 +00:00
|
|
|
const DecodedKey key_decoded = compare_.decode_key(key);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
int height = x->UnstashHeight();
|
|
|
|
assert(height >= 1 && height <= kMaxHeight_);
|
2016-02-03 17:21:44 +00:00
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
int max_height = max_height_.load(std::memory_order_relaxed);
|
|
|
|
while (height > max_height) {
|
2016-11-22 22:06:54 +00:00
|
|
|
if (max_height_.compare_exchange_weak(max_height, height)) {
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
// successfully updated it
|
|
|
|
max_height = height;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// else retry, possibly exiting the loop because somebody else
|
|
|
|
// increased it
|
|
|
|
}
|
|
|
|
assert(max_height <= kMaxPossibleHeight);
|
|
|
|
|
2016-11-22 22:06:54 +00:00
|
|
|
int recompute_height = 0;
|
|
|
|
if (splice->height_ < max_height) {
|
|
|
|
// Either splice has never been used or max_height has grown since
|
|
|
|
// last use. We could potentially fix it in the latter case, but
|
|
|
|
// that is tricky.
|
|
|
|
splice->prev_[max_height] = head_;
|
|
|
|
splice->next_[max_height] = nullptr;
|
|
|
|
splice->height_ = max_height;
|
|
|
|
recompute_height = max_height;
|
|
|
|
} else {
|
|
|
|
// Splice is a valid proper-height splice that brackets some
|
|
|
|
// key, but does it bracket this one? We need to validate it and
|
|
|
|
// recompute a portion of the splice (levels 0..recompute_height-1)
|
|
|
|
// that is a superset of all levels that don't bracket the new key.
|
|
|
|
// Several choices are reasonable, because we have to balance the work
|
|
|
|
// saved against the extra comparisons required to validate the Splice.
|
|
|
|
//
|
|
|
|
// One strategy is just to recompute all of orig_splice_height if the
|
|
|
|
// bottom level isn't bracketing. This pessimistically assumes that
|
|
|
|
// we will either get a perfect Splice hit (increasing sequential
|
|
|
|
// inserts) or have no locality.
|
|
|
|
//
|
|
|
|
// Another strategy is to walk up the Splice's levels until we find
|
|
|
|
// a level that brackets the key. This strategy lets the Splice
|
|
|
|
// hint help for other cases: it turns insertion from O(log N) into
|
|
|
|
// O(log D), where D is the number of nodes in between the key that
|
|
|
|
// produced the Splice and the current insert (insertion is aided
|
|
|
|
// whether the new key is before or after the splice). If you have
|
|
|
|
// a way of using a prefix of the key to map directly to the closest
|
|
|
|
// Splice out of O(sqrt(N)) Splices and we make it so that splices
|
|
|
|
// can also be used as hints during read, then we end up with Oshman's
|
|
|
|
// and Shavit's SkipTrie, which has O(log log N) lookup and insertion
|
|
|
|
// (compare to O(log N) for skip list).
|
|
|
|
//
|
|
|
|
// We control the pessimistic strategy with allow_partial_splice_fix.
|
|
|
|
// A good strategy is probably to be pessimistic for seq_splice_,
|
|
|
|
// optimistic if the caller actually went to the work of providing
|
|
|
|
// a Splice.
|
|
|
|
while (recompute_height < max_height) {
|
|
|
|
if (splice->prev_[recompute_height]->Next(recompute_height) !=
|
|
|
|
splice->next_[recompute_height]) {
|
|
|
|
// splice isn't tight at this level, there must have been some inserts
|
|
|
|
// to this
|
|
|
|
// location that didn't update the splice. We might only be a little
|
|
|
|
// stale, but if
|
|
|
|
// the splice is very stale it would be O(N) to fix it. We haven't used
|
|
|
|
// up any of
|
|
|
|
// our budget of comparisons, so always move up even if we are
|
|
|
|
// pessimistic about
|
|
|
|
// our chances of success.
|
|
|
|
++recompute_height;
|
|
|
|
} else if (splice->prev_[recompute_height] != head_ &&
|
2018-03-23 19:12:15 +00:00
|
|
|
!KeyIsAfterNode(key_decoded,
|
|
|
|
splice->prev_[recompute_height])) {
|
2016-11-22 22:06:54 +00:00
|
|
|
// key is from before splice
|
|
|
|
if (allow_partial_splice_fix) {
|
|
|
|
// skip all levels with the same node without more comparisons
|
|
|
|
Node* bad = splice->prev_[recompute_height];
|
|
|
|
while (splice->prev_[recompute_height] == bad) {
|
|
|
|
++recompute_height;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// we're pessimistic, recompute everything
|
|
|
|
recompute_height = max_height;
|
|
|
|
}
|
2022-10-28 20:16:50 +00:00
|
|
|
} else if (KeyIsAfterNode(key_decoded, splice->next_[recompute_height])) {
|
2016-11-22 22:06:54 +00:00
|
|
|
// key is from after splice
|
|
|
|
if (allow_partial_splice_fix) {
|
|
|
|
Node* bad = splice->next_[recompute_height];
|
|
|
|
while (splice->next_[recompute_height] == bad) {
|
|
|
|
++recompute_height;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
recompute_height = max_height;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// this level brackets the key, we won!
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-22 22:06:54 +00:00
|
|
|
assert(recompute_height <= max_height);
|
|
|
|
if (recompute_height > 0) {
|
2018-03-23 19:12:15 +00:00
|
|
|
RecomputeSpliceLevels(key_decoded, splice, recompute_height);
|
2016-11-22 22:06:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool splice_is_valid = true;
|
|
|
|
if (UseCAS) {
|
|
|
|
for (int i = 0; i < height; ++i) {
|
|
|
|
while (true) {
|
2018-02-01 02:45:49 +00:00
|
|
|
// Checking for duplicate keys on the level 0 is sufficient
|
|
|
|
if (UNLIKELY(i == 0 && splice->next_[i] != nullptr &&
|
2024-08-27 20:57:40 +00:00
|
|
|
compare_(splice->next_[i]->Key(), key_decoded) <= 0)) {
|
2018-02-01 02:45:49 +00:00
|
|
|
// duplicate key
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (UNLIKELY(i == 0 && splice->prev_[i] != head_ &&
|
2024-08-27 20:57:40 +00:00
|
|
|
compare_(splice->prev_[i]->Key(), key_decoded) >= 0)) {
|
2018-02-01 02:45:49 +00:00
|
|
|
// duplicate key
|
|
|
|
return false;
|
|
|
|
}
|
2016-11-22 22:06:54 +00:00
|
|
|
assert(splice->next_[i] == nullptr ||
|
|
|
|
compare_(x->Key(), splice->next_[i]->Key()) < 0);
|
|
|
|
assert(splice->prev_[i] == head_ ||
|
|
|
|
compare_(splice->prev_[i]->Key(), x->Key()) < 0);
|
|
|
|
x->NoBarrier_SetNext(i, splice->next_[i]);
|
|
|
|
if (splice->prev_[i]->CASNext(i, splice->next_[i], x)) {
|
|
|
|
// success
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// CAS failed, we need to recompute prev and next. It is unlikely
|
|
|
|
// to be helpful to try to use a different level as we redo the
|
|
|
|
// search, because it should be unlikely that lots of nodes have
|
|
|
|
// been inserted between prev[i] and next[i]. No point in using
|
|
|
|
// next[i] as the after hint, because we know it is stale.
|
2018-03-23 19:12:15 +00:00
|
|
|
FindSpliceForLevel<false>(key_decoded, splice->prev_[i], nullptr, i,
|
|
|
|
&splice->prev_[i], &splice->next_[i]);
|
2016-11-22 22:06:54 +00:00
|
|
|
|
|
|
|
// Since we've narrowed the bracket for level i, we might have
|
|
|
|
// violated the Splice constraint between i and i-1. Make sure
|
|
|
|
// we recompute the whole thing next time.
|
|
|
|
if (i > 0) {
|
|
|
|
splice_is_valid = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (int i = 0; i < height; ++i) {
|
|
|
|
if (i >= recompute_height &&
|
|
|
|
splice->prev_[i]->Next(i) != splice->next_[i]) {
|
2018-03-23 19:12:15 +00:00
|
|
|
FindSpliceForLevel<false>(key_decoded, splice->prev_[i], nullptr, i,
|
|
|
|
&splice->prev_[i], &splice->next_[i]);
|
2016-11-22 22:06:54 +00:00
|
|
|
}
|
2018-02-01 02:45:49 +00:00
|
|
|
// Checking for duplicate keys on the level 0 is sufficient
|
|
|
|
if (UNLIKELY(i == 0 && splice->next_[i] != nullptr &&
|
2024-08-27 20:57:40 +00:00
|
|
|
compare_(splice->next_[i]->Key(), key_decoded) <= 0)) {
|
2018-02-01 02:45:49 +00:00
|
|
|
// duplicate key
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (UNLIKELY(i == 0 && splice->prev_[i] != head_ &&
|
2024-08-27 20:57:40 +00:00
|
|
|
compare_(splice->prev_[i]->Key(), key_decoded) >= 0)) {
|
2018-02-01 02:45:49 +00:00
|
|
|
// duplicate key
|
|
|
|
return false;
|
|
|
|
}
|
2016-11-22 22:06:54 +00:00
|
|
|
assert(splice->next_[i] == nullptr ||
|
|
|
|
compare_(x->Key(), splice->next_[i]->Key()) < 0);
|
|
|
|
assert(splice->prev_[i] == head_ ||
|
|
|
|
compare_(splice->prev_[i]->Key(), x->Key()) < 0);
|
|
|
|
assert(splice->prev_[i]->Next(i) == splice->next_[i]);
|
|
|
|
x->NoBarrier_SetNext(i, splice->next_[i]);
|
|
|
|
splice->prev_[i]->SetNext(i, x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (splice_is_valid) {
|
|
|
|
for (int i = 0; i < height; ++i) {
|
|
|
|
splice->prev_[i] = x;
|
|
|
|
}
|
|
|
|
assert(splice->prev_[splice->height_] == head_);
|
|
|
|
assert(splice->next_[splice->height_] == nullptr);
|
|
|
|
for (int i = 0; i < splice->height_; ++i) {
|
|
|
|
assert(splice->next_[i] == nullptr ||
|
|
|
|
compare_(key, splice->next_[i]->Key()) < 0);
|
|
|
|
assert(splice->prev_[i] == head_ ||
|
|
|
|
compare_(splice->prev_[i]->Key(), key) <= 0);
|
|
|
|
assert(splice->prev_[i + 1] == splice->prev_[i] ||
|
|
|
|
splice->prev_[i + 1] == head_ ||
|
|
|
|
compare_(splice->prev_[i + 1]->Key(), splice->prev_[i]->Key()) <
|
|
|
|
0);
|
|
|
|
assert(splice->next_[i + 1] == splice->next_[i] ||
|
|
|
|
splice->next_[i + 1] == nullptr ||
|
|
|
|
compare_(splice->next_[i]->Key(), splice->next_[i + 1]->Key()) <
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
splice->height_ = 0;
|
|
|
|
}
|
2018-02-01 02:45:49 +00:00
|
|
|
return true;
|
2015-11-24 21:01:09 +00:00
|
|
|
}
|
|
|
|
|
2015-11-24 21:29:50 +00:00
|
|
|
template <class Comparator>
|
|
|
|
bool InlineSkipList<Comparator>::Contains(const char* key) const {
|
2024-08-19 20:53:25 +00:00
|
|
|
Node* x = FindGreaterOrEqual(key, nullptr);
|
InlineSkipList part 3/3 - new skiplist type that colocates key and node
Summary:
This diff completes the creation of InlineSkipList<Cmp>, which is like
SkipList<const char*, Cmp> but it always allocates the key contiguously
with the node. This allows us to remove the pointer from the node
to the key. As a result the memory usage of the skip list is reduced
(by 1 to sizeof(void*) bytes depending on the padding required to align
the key storage), cache locality is improved, and we halve the number
of calls to the allocator.
For skip lists whose keys are freshly-allocated const char*,
InlineSkipList is stricly preferrable to SkipList. This diff doesn't
replace SkipList, however, because some of the use cases of SkipList in
RocksDB are either character sequences that are not allocated at the
same time as the skip list node allocation (for example
hash_linklist_rep) or have different key types (for example
write_batch_with_index). Taking advantage of inline allocation for
those cases is left to future work.
The perf win is biggest for small values. For single-threaded CPU-bound
(32M fillrandom operations with no WAL log) with 16 byte keys and 0 byte
values, the db_bench perf goes from ~310k ops/sec to ~410k ops/sec. For
large values the improvement is less pronounced, but seems to be between
5% and 10% on the same configuration.
Test Plan: make check
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D51123
2015-11-19 22:24:29 +00:00
|
|
|
if (x != nullptr && Equal(key, x->Key())) {
|
2015-11-24 21:01:09 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-13 21:00:52 +00:00
|
|
|
template <class Comparator>
|
|
|
|
void InlineSkipList<Comparator>::TEST_Validate() const {
|
|
|
|
// Interate over all levels at the same time, and verify nodes appear in
|
|
|
|
// the right order, and nodes appear in upper level also appear in lower
|
|
|
|
// levels.
|
|
|
|
Node* nodes[kMaxPossibleHeight];
|
|
|
|
int max_height = GetMaxHeight();
|
2017-09-07 21:11:15 +00:00
|
|
|
assert(max_height > 0);
|
2016-11-13 21:00:52 +00:00
|
|
|
for (int i = 0; i < max_height; i++) {
|
|
|
|
nodes[i] = head_;
|
|
|
|
}
|
|
|
|
while (nodes[0] != nullptr) {
|
|
|
|
Node* l0_next = nodes[0]->Next(0);
|
|
|
|
if (l0_next == nullptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(nodes[0] == head_ || compare_(nodes[0]->Key(), l0_next->Key()) < 0);
|
|
|
|
nodes[0] = l0_next;
|
|
|
|
|
|
|
|
int i = 1;
|
|
|
|
while (i < max_height) {
|
|
|
|
Node* next = nodes[i]->Next(i);
|
|
|
|
if (next == nullptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
auto cmp = compare_(nodes[0]->Key(), next->Key());
|
|
|
|
assert(cmp <= 0);
|
|
|
|
if (cmp == 0) {
|
|
|
|
assert(next == nodes[0]);
|
|
|
|
nodes[i] = next;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int i = 1; i < max_height; i++) {
|
2017-09-07 21:11:15 +00:00
|
|
|
assert(nodes[i] != nullptr && nodes[i]->Next(i) == nullptr);
|
2016-11-13 21:00:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-19 20:53:25 +00:00
|
|
|
template <class Comparator>
|
|
|
|
Status InlineSkipList<Comparator>::Corruption(Node* prev, Node* next,
|
|
|
|
bool allow_data_in_errors) {
|
|
|
|
std::string msg = "Out-of-order keys found in skiplist.";
|
|
|
|
if (allow_data_in_errors) {
|
|
|
|
msg.append(" prev key: " + Slice(prev->Key()).ToString(true));
|
|
|
|
msg.append(" next key: " + Slice(next->Key()).ToString(true));
|
|
|
|
}
|
|
|
|
return Status::Corruption(msg);
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|