2022-04-15 20:56:22 +00:00
|
|
|
#!/usr/bin/env python3
|
2019-04-18 17:51:19 +00:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2022-09-21 00:47:52 +00:00
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
|
2019-10-23 20:51:03 +00:00
|
|
|
try:
|
|
|
|
from builtins import str
|
|
|
|
except ImportError:
|
|
|
|
from __builtin__ import str
|
2022-09-21 00:47:52 +00:00
|
|
|
import fnmatch
|
2019-08-02 17:40:32 +00:00
|
|
|
import json
|
2017-04-04 23:09:31 +00:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
|
2022-09-21 00:47:52 +00:00
|
|
|
from targets_builder import TARGETSBuilder
|
|
|
|
|
2017-04-04 23:09:31 +00:00
|
|
|
from util import ColorString
|
|
|
|
|
2019-08-02 17:40:32 +00:00
|
|
|
# This script generates TARGETS file for Buck.
|
|
|
|
# Buck is a build tool specifying dependencies among different build targets.
|
|
|
|
# User can pass extra dependencies as a JSON object via command line, and this
|
|
|
|
# script can include these dependencies in the generate TARGETS file.
|
|
|
|
# Usage:
|
2020-05-29 18:24:19 +00:00
|
|
|
# $python3 buckifier/buckify_rocksdb.py
|
2019-08-02 17:40:32 +00:00
|
|
|
# (This generates a TARGET file without user-specified dependency for unit
|
|
|
|
# tests.)
|
2020-05-29 18:24:19 +00:00
|
|
|
# $python3 buckifier/buckify_rocksdb.py \
|
2021-01-28 00:19:43 +00:00
|
|
|
# '{"fake": {
|
|
|
|
# "extra_deps": [":test_dep", "//fakes/module:mock1"],
|
Cleanup, improve, stress test LockWAL() (#11143)
Summary:
The previous API comments for LockWAL didn't provide much about why you might want to use it, and didn't really meet what one would infer its contract was. Also, LockWAL was not in db_stress / crash test. In this change:
* Implement a counting semantics for LockWAL()+UnlockWAL(), so that they can safely be used concurrently across threads or recursively within a thread. This should make the API much less bug-prone and easier to use.
* Make sure no UnlockWAL() is needed after non-OK LockWAL() (to match RocksDB conventions)
* Make UnlockWAL() reliably return non-OK when there's no matching LockWAL() (for debug-ability)
* Clarify API comments on LockWAL(), UnlockWAL(), FlushWAL(), and SyncWAL(). Their exact meanings are not obvious, and I don't think it's appropriate to talk about implementation mutexes in the API comments, but about what operations might block each other.
* Add LockWAL()/UnlockWAL() to db_stress and crash test, mostly to check for assertion failures, but also checks that latest seqno doesn't change while WAL is locked. This is simpler to add when LockWAL() is allowed in multiple threads.
* Remove unnecessary use of sync points in test DBWALTest::LockWal. There was a bug during development of above changes that caused this test to fail sporadically, with and without this sync point change.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11143
Test Plan: unit tests added / updated, added to stress/crash test
Reviewed By: ajkr
Differential Revision: D42848627
Pulled By: pdillinger
fbshipit-source-id: 6d976c51791941a31fd8fbf28b0f82e888d9f4b4
2023-01-31 06:52:30 +00:00
|
|
|
# "extra_compiler_flags": ["-DFOO_BAR", "-Os"]
|
2021-01-28 00:19:43 +00:00
|
|
|
# }
|
2019-08-02 17:40:32 +00:00
|
|
|
# }'
|
|
|
|
# (Generated TARGETS file has test_dep and mock1 as dependencies for RocksDB
|
|
|
|
# unit tests, and will use the extra_compiler_flags to compile the unit test
|
|
|
|
# source.)
|
|
|
|
|
2017-04-04 23:09:31 +00:00
|
|
|
# tests to export as libraries for inclusion in other projects
|
|
|
|
_EXPORTED_TEST_LIBS = ["env_basic_test"]
|
|
|
|
|
|
|
|
# Parse src.mk files as a Dictionary of
|
|
|
|
# VAR_NAME => list of files
|
|
|
|
def parse_src_mk(repo_path):
|
|
|
|
src_mk = repo_path + "/src.mk"
|
|
|
|
src_files = {}
|
|
|
|
for line in open(src_mk):
|
|
|
|
line = line.strip()
|
2022-09-21 00:47:52 +00:00
|
|
|
if len(line) == 0 or line[0] == "#":
|
2017-04-04 23:09:31 +00:00
|
|
|
continue
|
2022-09-21 00:47:52 +00:00
|
|
|
if "=" in line:
|
|
|
|
current_src = line.split("=")[0].strip()
|
2017-04-04 23:09:31 +00:00
|
|
|
src_files[current_src] = []
|
2022-09-21 00:47:52 +00:00
|
|
|
elif ".c" in line:
|
|
|
|
src_path = line.split("\\")[0].strip()
|
2017-04-04 23:09:31 +00:00
|
|
|
src_files[current_src].append(src_path)
|
|
|
|
return src_files
|
|
|
|
|
|
|
|
|
|
|
|
# get all .cc / .c files
|
|
|
|
def get_cc_files(repo_path):
|
|
|
|
cc_files = []
|
2022-09-21 00:47:52 +00:00
|
|
|
for root, _dirnames, filenames in os.walk(
|
|
|
|
repo_path
|
|
|
|
): # noqa: B007 T25377293 Grandfathered in
|
|
|
|
root = root[(len(repo_path) + 1) :]
|
2017-04-04 23:09:31 +00:00
|
|
|
if "java" in root:
|
|
|
|
# Skip java
|
|
|
|
continue
|
2022-09-21 00:47:52 +00:00
|
|
|
for filename in fnmatch.filter(filenames, "*.cc"):
|
2017-04-04 23:09:31 +00:00
|
|
|
cc_files.append(os.path.join(root, filename))
|
2022-09-21 00:47:52 +00:00
|
|
|
for filename in fnmatch.filter(filenames, "*.c"):
|
2017-04-04 23:09:31 +00:00
|
|
|
cc_files.append(os.path.join(root, filename))
|
|
|
|
return cc_files
|
|
|
|
|
|
|
|
|
2021-04-05 03:09:05 +00:00
|
|
|
# Get non_parallel tests from Makefile
|
|
|
|
def get_non_parallel_tests(repo_path):
|
2017-04-04 23:09:31 +00:00
|
|
|
Makefile = repo_path + "/Makefile"
|
|
|
|
|
2020-07-01 02:31:57 +00:00
|
|
|
s = set({})
|
2017-04-04 23:09:31 +00:00
|
|
|
|
2021-04-05 03:09:05 +00:00
|
|
|
found_non_parallel_tests = False
|
2017-04-04 23:09:31 +00:00
|
|
|
for line in open(Makefile):
|
|
|
|
line = line.strip()
|
2021-04-05 03:09:05 +00:00
|
|
|
if line.startswith("NON_PARALLEL_TEST ="):
|
|
|
|
found_non_parallel_tests = True
|
|
|
|
elif found_non_parallel_tests:
|
2017-04-04 23:09:31 +00:00
|
|
|
if line.endswith("\\"):
|
|
|
|
# remove the trailing \
|
|
|
|
line = line[:-1]
|
|
|
|
line = line.strip()
|
2020-07-01 02:31:57 +00:00
|
|
|
s.add(line)
|
2017-04-04 23:09:31 +00:00
|
|
|
else:
|
2021-04-05 03:09:05 +00:00
|
|
|
# we consumed all the non_parallel tests
|
2017-04-04 23:09:31 +00:00
|
|
|
break
|
2019-04-18 17:51:19 +00:00
|
|
|
|
2020-07-01 02:31:57 +00:00
|
|
|
return s
|
2017-04-04 23:09:31 +00:00
|
|
|
|
2022-09-21 00:47:52 +00:00
|
|
|
|
2019-08-02 17:40:32 +00:00
|
|
|
# Parse extra dependencies passed by user from command line
|
|
|
|
def get_dependencies():
|
2022-09-21 00:47:52 +00:00
|
|
|
deps_map = {"": {"extra_deps": [], "extra_compiler_flags": []}}
|
2019-08-02 17:40:32 +00:00
|
|
|
if len(sys.argv) < 2:
|
|
|
|
return deps_map
|
|
|
|
|
|
|
|
def encode_dict(data):
|
|
|
|
rv = {}
|
|
|
|
for k, v in data.items():
|
2019-10-23 20:51:03 +00:00
|
|
|
if isinstance(v, dict):
|
2019-08-02 17:40:32 +00:00
|
|
|
v = encode_dict(v)
|
|
|
|
rv[k] = v
|
|
|
|
return rv
|
2022-09-21 00:47:52 +00:00
|
|
|
|
2019-08-02 17:40:32 +00:00
|
|
|
extra_deps = json.loads(sys.argv[1], object_hook=encode_dict)
|
|
|
|
for target_alias, deps in extra_deps.items():
|
|
|
|
deps_map[target_alias] = deps
|
|
|
|
return deps_map
|
|
|
|
|
|
|
|
|
2017-04-04 23:09:31 +00:00
|
|
|
# Prepare TARGETS file for buck
|
2019-08-02 17:40:32 +00:00
|
|
|
def generate_targets(repo_path, deps_map):
|
2017-04-04 23:09:31 +00:00
|
|
|
print(ColorString.info("Generating TARGETS"))
|
|
|
|
# parsed src.mk file
|
|
|
|
src_mk = parse_src_mk(repo_path)
|
|
|
|
# get all .cc files
|
|
|
|
cc_files = get_cc_files(repo_path)
|
2021-04-05 03:09:05 +00:00
|
|
|
# get non_parallel tests from Makefile
|
|
|
|
non_parallel_tests = get_non_parallel_tests(repo_path)
|
2017-04-04 23:09:31 +00:00
|
|
|
|
2021-04-05 03:09:05 +00:00
|
|
|
if src_mk is None or cc_files is None or non_parallel_tests is None:
|
2017-04-04 23:09:31 +00:00
|
|
|
return False
|
|
|
|
|
2021-01-28 00:19:43 +00:00
|
|
|
extra_argv = ""
|
|
|
|
if len(sys.argv) >= 2:
|
|
|
|
# Heuristically quote and canonicalize whitespace for inclusion
|
|
|
|
# in how the file was generated.
|
|
|
|
extra_argv = " '{0}'".format(" ".join(sys.argv[1].split()))
|
|
|
|
|
|
|
|
TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path, extra_argv)
|
2020-05-20 18:35:28 +00:00
|
|
|
|
2017-04-04 23:09:31 +00:00
|
|
|
# rocksdb_lib
|
|
|
|
TARGETS.add_library(
|
|
|
|
"rocksdb_lib",
|
|
|
|
src_mk["LIB_SOURCES"] +
|
2021-03-29 23:31:26 +00:00
|
|
|
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
2022-09-21 00:47:52 +00:00
|
|
|
src_mk["RANGE_TREE_SOURCES"] + src_mk["TOOL_LIB_SOURCES"],
|
Multi file concurrency in MultiGet using coroutines and async IO (#9968)
Summary:
This PR implements a coroutine version of batched MultiGet in order to concurrently read from multiple SST files in a level using async IO, thus reducing the latency of the MultiGet. The API from the user perspective is still synchronous and single threaded, with the RocksDB part of the processing happening in the context of the caller's thread. In Version::MultiGet, the decision is made whether to call synchronous or coroutine code.
A good way to review this PR is to review the first 4 commits in order - de773b3, 70c2f70, 10b50e1, and 377a597 - before reviewing the rest.
TODO:
1. Figure out how to build it in CircleCI (requires some dependencies to be installed)
2. Do some stress testing with coroutines enabled
No regression in synchronous MultiGet between this branch and main -
```
./db_bench -use_existing_db=true --db=/data/mysql/rocksdb/prefix_scan -benchmarks="readseq,multireadrandom" -key_size=32 -value_size=512 -num=5000000 -batch_size=64 -multiread_batched=true -use_direct_reads=false -duration=60 -ops_between_duration_checks=1 -readonly=true -adaptive_readahead=true -threads=16 -cache_size=10485760000 -async_io=false -multiread_stride=40000 -statistics
```
Branch - ```multireadrandom : 4.025 micros/op 3975111 ops/sec 60.001 seconds 238509056 operations; 2062.3 MB/s (14767808 of 14767808 found)```
Main - ```multireadrandom : 3.987 micros/op 4013216 ops/sec 60.001 seconds 240795392 operations; 2082.1 MB/s (15231040 of 15231040 found)```
More benchmarks in various scenarios are given below. The measurements were taken with ```async_io=false``` (no coroutines) and ```async_io=true``` (use coroutines). For an IO bound workload (with every key requiring an IO), the coroutines version shows a clear benefit, being ~2.6X faster. For CPU bound workloads, the coroutines version has ~6-15% higher CPU utilization, depending on how many keys overlap an SST file.
1. Single thread IO bound workload on remote storage with sparse MultiGet batch keys (~1 key overlap/file) -
No coroutines - ```multireadrandom : 831.774 micros/op 1202 ops/sec 60.001 seconds 72136 operations; 0.6 MB/s (72136 of 72136 found)```
Using coroutines - ```multireadrandom : 318.742 micros/op 3137 ops/sec 60.003 seconds 188248 operations; 1.6 MB/s (188248 of 188248 found)```
2. Single thread CPU bound workload (all data cached) with ~1 key overlap/file -
No coroutines - ```multireadrandom : 4.127 micros/op 242322 ops/sec 60.000 seconds 14539384 operations; 125.7 MB/s (14539384 of 14539384 found)```
Using coroutines - ```multireadrandom : 4.741 micros/op 210935 ops/sec 60.000 seconds 12656176 operations; 109.4 MB/s (12656176 of 12656176 found)```
3. Single thread CPU bound workload with ~2 key overlap/file -
No coroutines - ```multireadrandom : 3.717 micros/op 269000 ops/sec 60.000 seconds 16140024 operations; 139.6 MB/s (16140024 of 16140024 found)```
Using coroutines - ```multireadrandom : 4.146 micros/op 241204 ops/sec 60.000 seconds 14472296 operations; 125.1 MB/s (14472296 of 14472296 found)```
4. CPU bound multi-threaded (16 threads) with ~4 key overlap/file -
No coroutines - ```multireadrandom : 4.534 micros/op 3528792 ops/sec 60.000 seconds 211728728 operations; 1830.7 MB/s (12737024 of 12737024 found) ```
Using coroutines - ```multireadrandom : 4.872 micros/op 3283812 ops/sec 60.000 seconds 197030096 operations; 1703.6 MB/s (12548032 of 12548032 found) ```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9968
Reviewed By: akankshamahajan15
Differential Revision: D36348563
Pulled By: anand1976
fbshipit-source-id: c0ce85a505fd26ebfbb09786cbd7f25202038696
2022-05-19 22:36:27 +00:00
|
|
|
deps=[
|
|
|
|
"//folly/container:f14_hash",
|
|
|
|
"//folly/experimental/coro:blocking_wait",
|
|
|
|
"//folly/experimental/coro:collect",
|
|
|
|
"//folly/experimental/coro:coroutine",
|
2022-06-17 20:08:45 +00:00
|
|
|
"//folly/experimental/coro:task",
|
2022-09-21 00:47:52 +00:00
|
|
|
"//folly/synchronization:distributed_mutex",
|
|
|
|
],
|
|
|
|
)
|
2020-10-01 05:49:20 +00:00
|
|
|
# rocksdb_whole_archive_lib
|
|
|
|
TARGETS.add_library(
|
|
|
|
"rocksdb_whole_archive_lib",
|
2023-05-01 17:45:36 +00:00
|
|
|
[],
|
Multi file concurrency in MultiGet using coroutines and async IO (#9968)
Summary:
This PR implements a coroutine version of batched MultiGet in order to concurrently read from multiple SST files in a level using async IO, thus reducing the latency of the MultiGet. The API from the user perspective is still synchronous and single threaded, with the RocksDB part of the processing happening in the context of the caller's thread. In Version::MultiGet, the decision is made whether to call synchronous or coroutine code.
A good way to review this PR is to review the first 4 commits in order - de773b3, 70c2f70, 10b50e1, and 377a597 - before reviewing the rest.
TODO:
1. Figure out how to build it in CircleCI (requires some dependencies to be installed)
2. Do some stress testing with coroutines enabled
No regression in synchronous MultiGet between this branch and main -
```
./db_bench -use_existing_db=true --db=/data/mysql/rocksdb/prefix_scan -benchmarks="readseq,multireadrandom" -key_size=32 -value_size=512 -num=5000000 -batch_size=64 -multiread_batched=true -use_direct_reads=false -duration=60 -ops_between_duration_checks=1 -readonly=true -adaptive_readahead=true -threads=16 -cache_size=10485760000 -async_io=false -multiread_stride=40000 -statistics
```
Branch - ```multireadrandom : 4.025 micros/op 3975111 ops/sec 60.001 seconds 238509056 operations; 2062.3 MB/s (14767808 of 14767808 found)```
Main - ```multireadrandom : 3.987 micros/op 4013216 ops/sec 60.001 seconds 240795392 operations; 2082.1 MB/s (15231040 of 15231040 found)```
More benchmarks in various scenarios are given below. The measurements were taken with ```async_io=false``` (no coroutines) and ```async_io=true``` (use coroutines). For an IO bound workload (with every key requiring an IO), the coroutines version shows a clear benefit, being ~2.6X faster. For CPU bound workloads, the coroutines version has ~6-15% higher CPU utilization, depending on how many keys overlap an SST file.
1. Single thread IO bound workload on remote storage with sparse MultiGet batch keys (~1 key overlap/file) -
No coroutines - ```multireadrandom : 831.774 micros/op 1202 ops/sec 60.001 seconds 72136 operations; 0.6 MB/s (72136 of 72136 found)```
Using coroutines - ```multireadrandom : 318.742 micros/op 3137 ops/sec 60.003 seconds 188248 operations; 1.6 MB/s (188248 of 188248 found)```
2. Single thread CPU bound workload (all data cached) with ~1 key overlap/file -
No coroutines - ```multireadrandom : 4.127 micros/op 242322 ops/sec 60.000 seconds 14539384 operations; 125.7 MB/s (14539384 of 14539384 found)```
Using coroutines - ```multireadrandom : 4.741 micros/op 210935 ops/sec 60.000 seconds 12656176 operations; 109.4 MB/s (12656176 of 12656176 found)```
3. Single thread CPU bound workload with ~2 key overlap/file -
No coroutines - ```multireadrandom : 3.717 micros/op 269000 ops/sec 60.000 seconds 16140024 operations; 139.6 MB/s (16140024 of 16140024 found)```
Using coroutines - ```multireadrandom : 4.146 micros/op 241204 ops/sec 60.000 seconds 14472296 operations; 125.1 MB/s (14472296 of 14472296 found)```
4. CPU bound multi-threaded (16 threads) with ~4 key overlap/file -
No coroutines - ```multireadrandom : 4.534 micros/op 3528792 ops/sec 60.000 seconds 211728728 operations; 1830.7 MB/s (12737024 of 12737024 found) ```
Using coroutines - ```multireadrandom : 4.872 micros/op 3283812 ops/sec 60.000 seconds 197030096 operations; 1703.6 MB/s (12548032 of 12548032 found) ```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9968
Reviewed By: akankshamahajan15
Differential Revision: D36348563
Pulled By: anand1976
fbshipit-source-id: c0ce85a505fd26ebfbb09786cbd7f25202038696
2022-05-19 22:36:27 +00:00
|
|
|
deps=[
|
2023-05-01 17:45:36 +00:00
|
|
|
":rocksdb_lib",
|
2022-09-21 00:47:52 +00:00
|
|
|
],
|
2020-10-01 05:49:20 +00:00
|
|
|
headers=None,
|
|
|
|
extra_external_deps="",
|
2022-09-21 00:47:52 +00:00
|
|
|
link_whole=True,
|
|
|
|
)
|
2017-04-04 23:09:31 +00:00
|
|
|
# rocksdb_test_lib
|
|
|
|
TARGETS.add_library(
|
|
|
|
"rocksdb_test_lib",
|
2022-09-21 00:47:52 +00:00
|
|
|
src_mk.get("MOCK_LIB_SOURCES", [])
|
|
|
|
+ src_mk.get("TEST_LIB_SOURCES", [])
|
|
|
|
+ src_mk.get("EXP_LIB_SOURCES", [])
|
|
|
|
+ src_mk.get("ANALYZER_LIB_SOURCES", []),
|
2020-06-05 19:14:42 +00:00
|
|
|
[":rocksdb_lib"],
|
2022-09-21 00:47:52 +00:00
|
|
|
extra_test_libs=True,
|
|
|
|
)
|
2017-04-04 23:09:31 +00:00
|
|
|
# rocksdb_tools_lib
|
|
|
|
TARGETS.add_library(
|
|
|
|
"rocksdb_tools_lib",
|
2022-09-21 00:47:52 +00:00
|
|
|
src_mk.get("BENCH_LIB_SOURCES", [])
|
|
|
|
+ src_mk.get("ANALYZER_LIB_SOURCES", [])
|
|
|
|
+ ["test_util/testutil.cc"],
|
|
|
|
[":rocksdb_lib"],
|
|
|
|
)
|
2021-05-19 22:24:37 +00:00
|
|
|
# rocksdb_cache_bench_tools_lib
|
|
|
|
TARGETS.add_library(
|
|
|
|
"rocksdb_cache_bench_tools_lib",
|
|
|
|
src_mk.get("CACHE_BENCH_LIB_SOURCES", []),
|
2022-09-21 00:47:52 +00:00
|
|
|
[":rocksdb_lib"],
|
|
|
|
)
|
2019-10-22 02:38:42 +00:00
|
|
|
# rocksdb_stress_lib
|
2020-05-17 04:46:21 +00:00
|
|
|
TARGETS.add_rocksdb_library(
|
2019-10-22 02:38:42 +00:00
|
|
|
"rocksdb_stress_lib",
|
|
|
|
src_mk.get("ANALYZER_LIB_SOURCES", [])
|
2022-09-21 00:47:52 +00:00
|
|
|
+ src_mk.get("STRESS_LIB_SOURCES", [])
|
|
|
|
+ ["test_util/testutil.cc"],
|
|
|
|
)
|
2024-05-20 23:08:43 +00:00
|
|
|
# ldb binary
|
|
|
|
TARGETS.add_binary(
|
|
|
|
"ldb", ["tools/ldb.cc"], [":rocksdb_tools_lib"]
|
|
|
|
)
|
2022-04-14 06:54:35 +00:00
|
|
|
# db_stress binary
|
2022-09-21 00:47:52 +00:00
|
|
|
TARGETS.add_binary(
|
|
|
|
"db_stress", ["db_stress_tool/db_stress.cc"], [":rocksdb_stress_lib"]
|
|
|
|
)
|
2024-09-10 18:37:29 +00:00
|
|
|
# db_bench binary
|
|
|
|
TARGETS.add_binary(
|
|
|
|
"db_bench", ["tools/db_bench.cc"], [":rocksdb_tools_lib"]
|
|
|
|
)
|
2023-10-23 22:12:36 +00:00
|
|
|
# cache_bench binary
|
|
|
|
TARGETS.add_binary(
|
|
|
|
"cache_bench", ["cache/cache_bench.cc"], [":rocksdb_cache_bench_tools_lib"]
|
|
|
|
)
|
2022-02-18 18:59:57 +00:00
|
|
|
# bench binaries
|
|
|
|
for src in src_mk.get("MICROBENCH_SOURCES", []):
|
2022-09-21 00:47:52 +00:00
|
|
|
name = src.rsplit("/", 1)[1].split(".")[0] if "/" in src else src.split(".")[0]
|
|
|
|
TARGETS.add_binary(name, [src], [], extra_bench_libs=True)
|
2020-05-13 04:35:08 +00:00
|
|
|
print("Extra dependencies:\n{0}".format(json.dumps(deps_map)))
|
2020-07-01 02:31:57 +00:00
|
|
|
|
|
|
|
# Dictionary test executable name -> relative source file path
|
|
|
|
test_source_map = {}
|
2020-07-03 03:27:31 +00:00
|
|
|
|
|
|
|
# c_test.c is added through TARGETS.add_c_test(). If there
|
|
|
|
# are more than one .c test file, we need to extend
|
|
|
|
# TARGETS.add_c_test() to include other C tests too.
|
|
|
|
for test_src in src_mk.get("TEST_MAIN_SOURCES_C", []):
|
2022-09-21 00:47:52 +00:00
|
|
|
if test_src != "db/c_test.c":
|
2020-07-03 03:27:31 +00:00
|
|
|
print("Don't know how to deal with " + test_src)
|
|
|
|
return False
|
|
|
|
TARGETS.add_c_test()
|
|
|
|
|
2022-02-18 18:59:57 +00:00
|
|
|
try:
|
|
|
|
with open(f"{repo_path}/buckifier/bench.json") as json_file:
|
|
|
|
fast_fancy_bench_config_list = json.load(json_file)
|
|
|
|
for config_dict in fast_fancy_bench_config_list:
|
2022-03-01 23:09:45 +00:00
|
|
|
clean_benchmarks = {}
|
2022-09-21 00:47:52 +00:00
|
|
|
benchmarks = config_dict["benchmarks"]
|
2022-03-01 23:09:45 +00:00
|
|
|
for binary, benchmark_dict in benchmarks.items():
|
|
|
|
clean_benchmarks[binary] = {}
|
|
|
|
for benchmark, overloaded_metric_list in benchmark_dict.items():
|
|
|
|
clean_benchmarks[binary][benchmark] = []
|
|
|
|
for metric in overloaded_metric_list:
|
|
|
|
if not isinstance(metric, dict):
|
|
|
|
clean_benchmarks[binary][benchmark].append(metric)
|
2022-09-21 00:47:52 +00:00
|
|
|
TARGETS.add_fancy_bench_config(
|
|
|
|
config_dict["name"],
|
|
|
|
clean_benchmarks,
|
|
|
|
False,
|
|
|
|
config_dict["expected_runtime_one_iter"],
|
|
|
|
config_dict["sl_iterations"],
|
|
|
|
config_dict["regression_threshold"],
|
|
|
|
)
|
2022-02-18 18:59:57 +00:00
|
|
|
|
|
|
|
with open(f"{repo_path}/buckifier/bench-slow.json") as json_file:
|
|
|
|
slow_fancy_bench_config_list = json.load(json_file)
|
|
|
|
for config_dict in slow_fancy_bench_config_list:
|
2022-03-01 23:09:45 +00:00
|
|
|
clean_benchmarks = {}
|
2022-09-21 00:47:52 +00:00
|
|
|
benchmarks = config_dict["benchmarks"]
|
2022-03-01 23:09:45 +00:00
|
|
|
for binary, benchmark_dict in benchmarks.items():
|
|
|
|
clean_benchmarks[binary] = {}
|
|
|
|
for benchmark, overloaded_metric_list in benchmark_dict.items():
|
|
|
|
clean_benchmarks[binary][benchmark] = []
|
|
|
|
for metric in overloaded_metric_list:
|
|
|
|
if not isinstance(metric, dict):
|
|
|
|
clean_benchmarks[binary][benchmark].append(metric)
|
|
|
|
for config_dict in slow_fancy_bench_config_list:
|
2022-09-21 00:47:52 +00:00
|
|
|
TARGETS.add_fancy_bench_config(
|
|
|
|
config_dict["name"] + "_slow",
|
|
|
|
clean_benchmarks,
|
|
|
|
True,
|
|
|
|
config_dict["expected_runtime_one_iter"],
|
|
|
|
config_dict["sl_iterations"],
|
|
|
|
config_dict["regression_threshold"],
|
|
|
|
)
|
2022-03-01 23:09:45 +00:00
|
|
|
# it is better servicelab experiments break
|
|
|
|
# than rocksdb github ci
|
|
|
|
except Exception:
|
2022-02-18 18:59:57 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
TARGETS.add_test_header()
|
|
|
|
|
2020-07-03 03:27:31 +00:00
|
|
|
for test_src in src_mk.get("TEST_MAIN_SOURCES", []):
|
2022-09-21 00:47:52 +00:00
|
|
|
test = test_src.split(".c")[0].strip().split("/")[-1].strip()
|
2020-07-01 02:31:57 +00:00
|
|
|
test_source_map[test] = test_src
|
|
|
|
print("" + test + " " + test_src)
|
|
|
|
|
2019-08-02 17:40:32 +00:00
|
|
|
for target_alias, deps in deps_map.items():
|
2020-07-01 02:31:57 +00:00
|
|
|
for test, test_src in sorted(test_source_map.items()):
|
|
|
|
if len(test) == 0:
|
|
|
|
print(ColorString.warning("Failed to get test name for %s" % test_src))
|
2019-08-02 17:40:32 +00:00
|
|
|
continue
|
|
|
|
|
2022-09-21 00:47:52 +00:00
|
|
|
test_target_name = test if not target_alias else test + "_" + target_alias
|
2019-08-02 17:40:32 +00:00
|
|
|
|
|
|
|
if test in _EXPORTED_TEST_LIBS:
|
|
|
|
test_library = "%s_lib" % test_target_name
|
2022-09-21 00:47:52 +00:00
|
|
|
TARGETS.add_library(
|
|
|
|
test_library,
|
|
|
|
[test_src],
|
|
|
|
deps=[":rocksdb_test_lib"],
|
|
|
|
extra_test_libs=True,
|
|
|
|
)
|
2022-02-18 18:59:57 +00:00
|
|
|
TARGETS.register_test(
|
|
|
|
test_target_name,
|
|
|
|
test_src,
|
2022-09-21 00:47:52 +00:00
|
|
|
deps=json.dumps(deps["extra_deps"] + [":" + test_library]),
|
|
|
|
extra_compiler_flags=json.dumps(deps["extra_compiler_flags"]),
|
|
|
|
)
|
2022-02-18 18:59:57 +00:00
|
|
|
else:
|
|
|
|
TARGETS.register_test(
|
|
|
|
test_target_name,
|
|
|
|
test_src,
|
2022-09-21 00:47:52 +00:00
|
|
|
deps=json.dumps(deps["extra_deps"] + [":rocksdb_test_lib"]),
|
|
|
|
extra_compiler_flags=json.dumps(deps["extra_compiler_flags"]),
|
|
|
|
)
|
2023-10-24 18:46:18 +00:00
|
|
|
TARGETS.export_file("tools/db_crashtest.py")
|
2017-04-04 23:09:31 +00:00
|
|
|
|
|
|
|
print(ColorString.info("Generated TARGETS Summary:"))
|
|
|
|
print(ColorString.info("- %d libs" % TARGETS.total_lib))
|
|
|
|
print(ColorString.info("- %d binarys" % TARGETS.total_bin))
|
|
|
|
print(ColorString.info("- %d tests" % TARGETS.total_test))
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def get_rocksdb_path():
|
|
|
|
# rocksdb = {script_dir}/..
|
|
|
|
script_dir = os.path.dirname(sys.argv[0])
|
|
|
|
script_dir = os.path.abspath(script_dir)
|
2022-09-21 00:47:52 +00:00
|
|
|
rocksdb_path = os.path.abspath(os.path.join(script_dir, "../"))
|
2017-04-04 23:09:31 +00:00
|
|
|
|
|
|
|
return rocksdb_path
|
|
|
|
|
2020-05-20 18:35:28 +00:00
|
|
|
|
2017-04-04 23:09:31 +00:00
|
|
|
def exit_with_error(msg):
|
|
|
|
print(ColorString.error(msg))
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
2019-08-02 17:40:32 +00:00
|
|
|
deps_map = get_dependencies()
|
2017-04-04 23:09:31 +00:00
|
|
|
# Generate TARGETS file for buck
|
2019-08-02 17:40:32 +00:00
|
|
|
ok = generate_targets(get_rocksdb_path(), deps_map)
|
2017-04-04 23:09:31 +00:00
|
|
|
if not ok:
|
|
|
|
exit_with_error("Failed to generate TARGETS files")
|
|
|
|
|
2022-09-21 00:47:52 +00:00
|
|
|
|
2017-04-04 23:09:31 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|