2017-08-03 22:43:05 +00:00
|
|
|
#!/usr/bin/env bash
|
2019-04-18 17:51:19 +00:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2014-09-12 23:25:35 +00:00
|
|
|
# REQUIRE: db_bench binary exists in the current directory
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
# Exit Codes
|
2021-08-16 13:24:35 +00:00
|
|
|
EXIT_INVALID_ARGS=1
|
|
|
|
EXIT_NOT_COMPACTION_TEST=2
|
|
|
|
EXIT_UNKNOWN_JOB=3
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
# Size Constants
|
|
|
|
K=1024
|
|
|
|
M=$((1024 * K))
|
|
|
|
G=$((1024 * M))
|
|
|
|
T=$((1024 * G))
|
|
|
|
|
|
|
|
function display_usage() {
|
2022-06-03 15:20:10 +00:00
|
|
|
echo "usage: benchmark.sh [--help] <test>"
|
2021-09-14 18:08:46 +00:00
|
|
|
echo ""
|
|
|
|
echo "These are the available benchmark tests:"
|
|
|
|
echo -e "\tbulkload"
|
|
|
|
echo -e "\tfillseq_disable_wal\t\tSequentially fill the database with no WAL"
|
|
|
|
echo -e "\tfillseq_enable_wal\t\tSequentially fill the database with WAL"
|
|
|
|
echo -e "\toverwrite"
|
|
|
|
echo -e "\tupdaterandom"
|
|
|
|
echo -e "\treadrandom"
|
|
|
|
echo -e "\tmergerandom"
|
|
|
|
echo -e "\tfilluniquerandom"
|
|
|
|
echo -e "\tmultireadrandom"
|
|
|
|
echo -e "\tfwdrange"
|
|
|
|
echo -e "\trevrange"
|
|
|
|
echo -e "\treadwhilewriting"
|
|
|
|
echo -e "\treadwhilemerging"
|
|
|
|
echo -e "\tfwdrangewhilewriting"
|
|
|
|
echo -e "\trevrangewhilewriting"
|
|
|
|
echo -e "\tfwdrangewhilemerging"
|
|
|
|
echo -e "\trevrangewhilemerging"
|
|
|
|
echo -e "\trandomtransaction"
|
|
|
|
echo -e "\tuniversal_compaction"
|
|
|
|
echo -e "\tdebug"
|
|
|
|
echo ""
|
2022-06-03 15:20:10 +00:00
|
|
|
echo "Generic enviroment Variables:"
|
|
|
|
echo -e "\tJOB_ID\t\t\t\tAn identifier for the benchmark job, will appear in the results"
|
2021-09-14 18:08:46 +00:00
|
|
|
echo -e "\tDB_DIR\t\t\t\tPath to write the database data directory"
|
|
|
|
echo -e "\tWAL_DIR\t\t\t\tPath to write the database WAL directory"
|
|
|
|
echo -e "\tOUTPUT_DIR\t\t\tPath to write the benchmark results to (default: /tmp)"
|
|
|
|
echo -e "\tNUM_KEYS\t\t\tThe number of keys to use in the benchmark"
|
|
|
|
echo -e "\tKEY_SIZE\t\t\tThe size of the keys to use in the benchmark (default: 20 bytes)"
|
|
|
|
echo -e "\tVALUE_SIZE\t\t\tThe size of the values to use in the benchmark (default: 400 bytes)"
|
|
|
|
echo -e "\tBLOCK_SIZE\t\t\tThe size of the database blocks in the benchmark (default: 8 KB)"
|
|
|
|
echo -e "\tDB_BENCH_NO_SYNC\t\tDisable fsync on the WAL"
|
2022-06-03 15:20:10 +00:00
|
|
|
echo -e "\tNUMACTL\t\t\t\tWhen defined use numactl --interleave=all"
|
2021-09-14 18:08:46 +00:00
|
|
|
echo -e "\tNUM_THREADS\t\t\tThe number of threads to use (default: 64)"
|
2022-06-03 15:20:10 +00:00
|
|
|
echo -e "\tMB_WRITE_PER_SEC\t\t\tRate limit for background writer"
|
2021-09-14 18:08:46 +00:00
|
|
|
echo -e "\tNUM_NEXTS_PER_SEEK\t\t(default: 10)"
|
2022-06-22 23:04:03 +00:00
|
|
|
echo -e "\tCACHE_SIZE\t\t\tSize of the block cache (default: 16GB)"
|
|
|
|
echo -e "\tCACHE_NUMSHARDBITS\t\t\tNumber of shards for the block cache is 2 ** cache_numshardbits (default: 6)"
|
2021-09-14 18:08:46 +00:00
|
|
|
echo -e "\tCOMPRESSION_MAX_DICT_BYTES"
|
2022-06-03 15:20:10 +00:00
|
|
|
echo -e "\tCOMPRESSION_TYPE\t\tDefault compression(default: zstd)"
|
|
|
|
echo -e "\tBOTTOMMOST_COMPRESSION\t\t(default: none)"
|
|
|
|
echo -e "\tMIN_LEVEL_TO_COMPRESS\t\tValue for min_level_to_compress for Leveled"
|
|
|
|
echo -e "\tCOMPRESSION_SIZE_PERCENT\tValue for compression_size_percent for Universal"
|
|
|
|
echo -e "\tDURATION\t\t\tNumber of seconds for which the test runs"
|
|
|
|
echo -e "\tWRITES\t\t\t\tNumber of writes for which the test runs"
|
|
|
|
echo -e "\tWRITE_BUFFER_SIZE_MB\t\tThe size of the write buffer in MB (default: 128)"
|
|
|
|
echo -e "\tTARGET_FILE_SIZE_BASE_MB\tThe value for target_file_size_base in MB (default: 128)"
|
|
|
|
echo -e "\tMAX_BYTES_FOR_LEVEL_BASE_MB\tThe value for max_bytes_for_level_base in MB (default: 128)"
|
|
|
|
echo -e "\tMAX_BACKGROUND_JOBS\t\tThe value for max_background_jobs (default: 16)"
|
|
|
|
echo -e "\tCACHE_INDEX_AND_FILTER_BLOCKS\tThe value for cache_index_and_filter_blocks (default: 0)"
|
|
|
|
echo -e "\tUSE_O_DIRECT\t\t\tUse O_DIRECT for user reads and compaction"
|
2022-10-12 22:13:28 +00:00
|
|
|
echo -e "\tBYTES_PER_SYNC\t\t\tValue for bytes_per_sync, set to zero when USE_O_DIRECT is true"
|
2022-06-03 15:20:10 +00:00
|
|
|
echo -e "\tSTATS_INTERVAL_SECONDS\t\tValue for stats_interval_seconds"
|
|
|
|
echo -e "\tREPORT_INTERVAL_SECONDS\t\tValue for report_interval_seconds"
|
|
|
|
echo -e "\tSUBCOMPACTIONS\t\t\tValue for subcompactions"
|
|
|
|
echo -e "\tCOMPACTION_STYLE\t\tOne of leveled, universal, blob. Default is leveled."
|
|
|
|
echo -e "\nEnvironment variables (mostly) for leveled compaction:"
|
|
|
|
echo -e "\tLEVEL0_FILE_NUM_COMPACTION_TRIGGER\t\tValue for level0_file_num_compaction_trigger"
|
|
|
|
echo -e "\tLEVEL0_SLOWDOWN_WRITES_TRIGGER\t\t\tValue for level0_slowdown_writes_trigger"
|
|
|
|
echo -e "\tLEVEL0_STOP_WRITES_TRIGGER\t\t\tValue for level0_stop_writes_trigger"
|
|
|
|
echo -e "\tPER_LEVEL_FANOUT\t\t\t\tValue for max_bytes_for_level_multiplier"
|
|
|
|
echo -e "\tSOFT_PENDING_COMPACTION_BYTES_LIMIT_IN_GB\tThe value for soft_pending_compaction_bytes_limit in GB"
|
|
|
|
echo -e "\tHARD_PENDING_COMPACTION_BYTES_LIMIT_IN_GB\tThe value for hard_pending_compaction_bytes_limit in GB"
|
|
|
|
echo -e "\nEnvironment variables for universal compaction:"
|
|
|
|
echo -e "\tUNIVERSAL_MIN_MERGE_WIDTH\tValue of min_merge_width option for universal"
|
|
|
|
echo -e "\tUNIVERSAL_MAX_MERGE_WIDTH\tValue of min_merge_width option for universal"
|
|
|
|
echo -e "\tUNIVERSAL_SIZE_RATIO\t\tValue of size_ratio option for universal"
|
|
|
|
echo -e "\tUNIVERSAL_MAX_SIZE_AMP\t\tmax_size_amplification_percent for universal"
|
|
|
|
echo -e "\tUNIVERSAL_ALLOW_TRIVIAL_MOVE\tSet allow_trivial_move to true for universal, default is false"
|
|
|
|
echo -e "\nOptions for integrated BlobDB"
|
|
|
|
echo -e "\tMIN_BLOB_SIZE\tValue for min_blob_size"
|
|
|
|
echo -e "\tBLOB_FILE_SIZE\tValue for blob_file_size"
|
|
|
|
echo -e "\tBLOB_COMPRESSION_TYPE\tValue for blob_compression_type"
|
|
|
|
echo -e "\tBLOB_GC_AGE_CUTOFF\tValue for blob_garbage_collection_age_cutoff"
|
|
|
|
echo -e "\tBLOB_GC_FORCE_THRESHOLD\tValue for blob_garbage_collection_force_threshold"
|
2022-06-22 23:04:03 +00:00
|
|
|
echo -e "\tBLOB_FILE_STARTING_LEVEL\t\tBlob file starting level (default: 0)"
|
|
|
|
echo -e "\tUSE_BLOB_CACHE\t\t\tEnable blob cache (default: 1)"
|
|
|
|
echo -e "\tUSE_SHARED_BLOCK_AND_BLOB_CACHE\t\t\tUse the same backing cache for block cache and blob cache (default: 1)"
|
|
|
|
echo -e "\tBLOB_CACHE_SIZE\t\t\tSize of the blob cache (default: 16GB)"
|
|
|
|
echo -e "\tBLOB_CACHE_NUMSHARDBITS\t\t\tNumber of shards for the blob cache is 2 ** blob_cache_numshardbits (default: 6)"
|
2022-07-17 14:13:59 +00:00
|
|
|
echo -e "\tPREPOPULATE_BLOB_CACHE\t\t\tPre-populate hot/warm blobs in blob cache (default: 0)"
|
2021-09-14 18:08:46 +00:00
|
|
|
}
|
|
|
|
|
2021-07-12 19:17:32 +00:00
|
|
|
if [ $# -lt 1 ]; then
|
2021-09-14 18:08:46 +00:00
|
|
|
display_usage
|
2021-08-16 13:24:35 +00:00
|
|
|
exit $EXIT_INVALID_ARGS
|
2014-09-12 23:25:35 +00:00
|
|
|
fi
|
2021-07-12 19:17:32 +00:00
|
|
|
bench_cmd=$1
|
|
|
|
shift
|
|
|
|
bench_args=$*
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
if [[ "$bench_cmd" == "--help" ]]; then
|
|
|
|
display_usage
|
|
|
|
exit
|
|
|
|
fi
|
|
|
|
|
|
|
|
job_id=${JOB_ID}
|
|
|
|
|
2016-03-04 20:32:11 +00:00
|
|
|
# Make it easier to run only the compaction test. Getting valid data requires
|
|
|
|
# a number of iterations and having an ability to run the test separately from
|
|
|
|
# rest of the benchmarks helps.
|
2021-07-12 19:17:32 +00:00
|
|
|
if [ "$COMPACTION_TEST" == "1" -a "$bench_cmd" != "universal_compaction" ]; then
|
2016-03-04 20:32:11 +00:00
|
|
|
echo "Skipping $1 because it's not a compaction test."
|
2021-08-16 13:24:35 +00:00
|
|
|
exit $EXIT_NOT_COMPACTION_TEST
|
2016-03-04 20:32:11 +00:00
|
|
|
fi
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
if [ -z $DB_DIR ]; then
|
|
|
|
echo "DB_DIR is not defined"
|
2021-08-16 13:24:35 +00:00
|
|
|
exit $EXIT_INVALID_ARGS
|
2014-09-12 23:25:35 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
if [ -z $WAL_DIR ]; then
|
|
|
|
echo "WAL_DIR is not defined"
|
2021-08-16 13:24:35 +00:00
|
|
|
exit $EXIT_INVALID_ARGS
|
2014-09-12 23:25:35 +00:00
|
|
|
fi
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
output_dir=${OUTPUT_DIR:-/tmp}
|
2014-09-12 23:25:35 +00:00
|
|
|
if [ ! -d $output_dir ]; then
|
|
|
|
mkdir -p $output_dir
|
|
|
|
fi
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
report="$output_dir/report.tsv"
|
|
|
|
schedule="$output_dir/schedule.txt"
|
|
|
|
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
# all multithreaded tests run with sync=1 unless
|
|
|
|
# $DB_BENCH_NO_SYNC is defined
|
|
|
|
syncval="1"
|
|
|
|
if [ ! -z $DB_BENCH_NO_SYNC ]; then
|
|
|
|
echo "Turning sync off for all multithreaded tests"
|
|
|
|
syncval="0";
|
|
|
|
fi
|
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
compaction_style=${COMPACTION_STYLE:-leveled}
|
|
|
|
if [ $compaction_style = "leveled" ]; then
|
|
|
|
echo Use leveled compaction
|
|
|
|
elif [ $compaction_style = "universal" ]; then
|
|
|
|
echo Use universal compaction
|
|
|
|
elif [ $compaction_style = "blob" ]; then
|
|
|
|
echo Use blob compaction
|
|
|
|
else
|
|
|
|
echo COMPACTION_STYLE is :: $COMPACTION_STYLE :: and must be one of leveled, universal, blob
|
|
|
|
exit $EXIT_INVALID_ARGS
|
|
|
|
fi
|
|
|
|
|
2018-12-18 00:27:08 +00:00
|
|
|
num_threads=${NUM_THREADS:-64}
|
2016-01-04 20:01:27 +00:00
|
|
|
mb_written_per_sec=${MB_WRITE_PER_SEC:-0}
|
2015-03-30 18:28:25 +00:00
|
|
|
# Only for tests that do range scans
|
|
|
|
num_nexts_per_seek=${NUM_NEXTS_PER_SEEK:-10}
|
2022-06-22 23:04:03 +00:00
|
|
|
cache_size=${CACHE_SIZE:-$(( 16 * $G ))}
|
|
|
|
cache_numshardbits=${CACHE_NUMSHARDBITS:-6}
|
2016-04-28 00:39:18 +00:00
|
|
|
compression_max_dict_bytes=${COMPRESSION_MAX_DICT_BYTES:-0}
|
2018-12-18 00:27:08 +00:00
|
|
|
compression_type=${COMPRESSION_TYPE:-zstd}
|
2022-06-03 15:20:10 +00:00
|
|
|
min_level_to_compress=${MIN_LEVEL_TO_COMPRESS:-"-1"}
|
|
|
|
compression_size_percent=${COMPRESSION_SIZE_PERCENT:-"-1"}
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
duration=${DURATION:-0}
|
2022-06-03 15:20:10 +00:00
|
|
|
writes=${WRITES:-0}
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2018-12-18 00:27:08 +00:00
|
|
|
num_keys=${NUM_KEYS:-8000000000}
|
Initial script for the new regression test
Summary:
This diff includes an initial script running a set of benchmarks for
regression test. The script does the following things:
checkout the specified rocksdb commit (or origin/master as default)
make clean && DEBUG_LEVEL=0 make db_bench
setup test directories
run set of benchmarks and store results
Currently, the script will run couple benchmarks, store all the benchmark
output, extract micros per op and percentile information for each benchmark
and store them in a single SUMMARY.csv file. The SUMMARY.csv will make the
follow-up regression detection easier.
In addition, the current script only takes env arguments to set important
attributes of db_bench. Will follow-up with a patch that allows db_bench
to construct options from an options file.
Test Plan:
NUM_KEYS=100 ./tools/regression_test.sh
Sample SUMMARY.csv file:
commit id, benchmark, ms-per-op, p50, p75, p99, p99.9, p99.99
7e23ddf575890510e7d2fc7a79b31a1bbf317917, fillseq, 15.28, 54.66, 77.14, 5000.00, 17900.00, 18483.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, overwrite, 13.54, 57.69, 86.39, 3000.00, 15600.00, 17013.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, readrandom, 1.04, 0.80, 1.67, 293.33, 395.00, 504.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, readwhilewriting, 2.75, 1.01, 1.87, 200.00, 460.00, 485.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, deleterandom, 3.64, 48.12, 70.09, 200.00, 336.67, 347.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, seekrandom, 24.31, 391.87, 513.69, 872.73, 990.00, 1048.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, seekrandomwhilewriting, 14.02, 185.14, 294.15, 700.00, 1440.00, 1527.00
Reviewers: sdong, IslamAbdelRahman, kradhakrishnan, yiwu, andrewkr, gunnarku
Reviewed By: gunnarku
Subscribers: gunnarku, MarkCallaghan, andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D57597
2016-05-09 20:32:57 +00:00
|
|
|
key_size=${KEY_SIZE:-20}
|
2015-03-30 18:28:25 +00:00
|
|
|
value_size=${VALUE_SIZE:-400}
|
2015-08-21 01:59:10 +00:00
|
|
|
block_size=${BLOCK_SIZE:-8192}
|
2022-06-03 15:20:10 +00:00
|
|
|
write_buffer_mb=${WRITE_BUFFER_SIZE_MB:-128}
|
|
|
|
target_file_mb=${TARGET_FILE_SIZE_BASE_MB:-128}
|
|
|
|
l1_mb=${MAX_BYTES_FOR_LEVEL_BASE_MB:-1024}
|
|
|
|
max_background_jobs=${MAX_BACKGROUND_JOBS:-16}
|
|
|
|
stats_interval_seconds=${STATS_INTERVAL_SECONDS:-60}
|
2022-10-12 22:13:28 +00:00
|
|
|
report_interval_seconds=${REPORT_INTERVAL_SECONDS:-1}
|
2022-06-03 15:20:10 +00:00
|
|
|
subcompactions=${SUBCOMPACTIONS:-1}
|
|
|
|
per_level_fanout=${PER_LEVEL_FANOUT:-8}
|
|
|
|
|
|
|
|
cache_index_and_filter=${CACHE_INDEX_AND_FILTER_BLOCKS:-0}
|
|
|
|
if [[ $cache_index_and_filter -eq 0 ]]; then
|
|
|
|
cache_meta_flags=""
|
|
|
|
elif [[ $cache_index_and_filter -eq 1 ]]; then
|
|
|
|
cache_meta_flags="\
|
|
|
|
--cache_index_and_filter_blocks=$cache_index_and_filter \
|
2022-08-13 00:59:06 +00:00
|
|
|
--cache_high_pri_pool_ratio=0.5 --cache_low_pri_pool_ratio=0"
|
2022-06-03 15:20:10 +00:00
|
|
|
else
|
|
|
|
echo CACHE_INDEX_AND_FILTER_BLOCKS was $CACHE_INDEX_AND_FILTER_BLOCKS but must be 0 or 1
|
|
|
|
exit $EXIT_INVALID_ARGS
|
|
|
|
fi
|
|
|
|
|
|
|
|
soft_pending_arg=""
|
|
|
|
if [ ! -z $SOFT_PENDING_COMPACTION_BYTES_LIMIT_IN_GB ]; then
|
|
|
|
soft_pending_bytes=$( echo $SOFT_PENDING_COMPACTION_BYTES_LIMIT_IN_GB | \
|
|
|
|
awk '{ printf "%.0f", $1 * GB }' GB=$G )
|
|
|
|
soft_pending_arg="--soft_pending_compaction_bytes_limit=$soft_pending_bytes"
|
|
|
|
fi
|
|
|
|
|
|
|
|
hard_pending_arg=""
|
|
|
|
if [ ! -z $HARD_PENDING_COMPACTION_BYTES_LIMIT_IN_GB ]; then
|
|
|
|
hard_pending_bytes=$( echo $HARD_PENDING_COMPACTION_BYTES_LIMIT_IN_GB | \
|
|
|
|
awk '{ printf "%.0f", $1 * GB }' GB=$G )
|
|
|
|
hard_pending_arg="--hard_pending_compaction_bytes_limit=$hard_pending_bytes"
|
|
|
|
fi
|
|
|
|
|
|
|
|
o_direct_flags=""
|
|
|
|
if [ ! -z $USE_O_DIRECT ]; then
|
2022-10-12 22:13:28 +00:00
|
|
|
# Some of these flags are only supported in new versions and --undefok makes that work
|
|
|
|
o_direct_flags="--use_direct_reads --use_direct_io_for_flush_and_compaction --prepopulate_block_cache=1"
|
|
|
|
bytes_per_sync=0
|
|
|
|
else
|
|
|
|
bytes_per_sync=${BYTES_PER_SYNC:-$(( 1 * M ))}
|
2022-06-03 15:20:10 +00:00
|
|
|
fi
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
univ_min_merge_width=${UNIVERSAL_MIN_MERGE_WIDTH:-2}
|
|
|
|
univ_max_merge_width=${UNIVERSAL_MAX_MERGE_WIDTH:-20}
|
|
|
|
univ_size_ratio=${UNIVERSAL_SIZE_RATIO:-1}
|
|
|
|
univ_max_size_amp=${UNIVERSAL_MAX_SIZE_AMP:-200}
|
|
|
|
|
|
|
|
if [ ! -z $UNIVERSAL_ALLOW_TRIVIAL_MOVE ]; then
|
|
|
|
univ_allow_trivial_move=1
|
|
|
|
else
|
|
|
|
univ_allow_trivial_move=0
|
|
|
|
fi
|
|
|
|
|
|
|
|
min_blob_size=${MIN_BLOB_SIZE:-0}
|
|
|
|
blob_file_size=${BLOB_FILE_SIZE:-$(( 256 * $M ))}
|
|
|
|
blob_compression_type=${BLOB_COMPRESSION_TYPE:-${compression_type}}
|
|
|
|
blob_gc_age_cutoff=${BLOB_GC_AGE_CUTOFF:-"0.25"}
|
|
|
|
blob_gc_force_threshold=${BLOB_GC_FORCE_THRESHOLD:-1}
|
2022-06-22 23:04:03 +00:00
|
|
|
blob_file_starting_level=${BLOB_FILE_STARTING_LEVEL:-0}
|
|
|
|
use_blob_cache=${USE_BLOB_CACHE:-1}
|
|
|
|
use_shared_block_and_blob_cache=${USE_SHARED_BLOCK_AND_BLOB_CACHE:-1}
|
|
|
|
blob_cache_size=${BLOB_CACHE_SIZE:-$(( 16 * $G ))}
|
|
|
|
blob_cache_numshardbits=${BLOB_CACHE_NUMSHARDBITS:-6}
|
2022-07-17 14:13:59 +00:00
|
|
|
prepopulate_blob_cache=${PREPOPULATE_BLOB_CACHE:-0}
|
2022-06-03 15:20:10 +00:00
|
|
|
|
2022-10-12 22:13:28 +00:00
|
|
|
# This script still works back to RocksDB 6.0
|
|
|
|
undef_params="\
|
|
|
|
use_blob_cache,\
|
|
|
|
use_shared_block_and_blob_cache,\
|
|
|
|
blob_cache_size,blob_cache_numshardbits,\
|
|
|
|
prepopulate_blob_cache,\
|
|
|
|
multiread_batched,\
|
|
|
|
cache_low_pri_pool_ratio,\
|
|
|
|
prepopulate_block_cache"
|
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
const_params_base="
|
2022-10-12 22:13:28 +00:00
|
|
|
--undefok=$undef_params \
|
2014-09-12 23:25:35 +00:00
|
|
|
--db=$DB_DIR \
|
|
|
|
--wal_dir=$WAL_DIR \
|
|
|
|
\
|
2015-03-30 18:28:25 +00:00
|
|
|
--num=$num_keys \
|
2014-09-12 23:25:35 +00:00
|
|
|
--key_size=$key_size \
|
|
|
|
--value_size=$value_size \
|
2015-04-22 20:23:08 +00:00
|
|
|
--block_size=$block_size \
|
2014-09-12 23:25:35 +00:00
|
|
|
--cache_size=$cache_size \
|
2022-06-22 23:04:03 +00:00
|
|
|
--cache_numshardbits=$cache_numshardbits \
|
2016-04-28 00:39:18 +00:00
|
|
|
--compression_max_dict_bytes=$compression_max_dict_bytes \
|
2014-09-12 23:25:35 +00:00
|
|
|
--compression_ratio=0.5 \
|
2016-04-28 00:39:18 +00:00
|
|
|
--compression_type=$compression_type \
|
2022-10-12 22:13:28 +00:00
|
|
|
--bytes_per_sync=$bytes_per_sync \
|
2022-06-03 15:20:10 +00:00
|
|
|
$cache_meta_flags \
|
|
|
|
$o_direct_flags \
|
2016-01-04 20:01:27 +00:00
|
|
|
--benchmark_write_rate_limit=$(( 1024 * 1024 * $mb_written_per_sec )) \
|
2014-09-12 23:25:35 +00:00
|
|
|
\
|
2022-06-03 15:20:10 +00:00
|
|
|
--write_buffer_size=$(( $write_buffer_mb * M)) \
|
|
|
|
--target_file_size_base=$(( $target_file_mb * M)) \
|
|
|
|
--max_bytes_for_level_base=$(( $l1_mb * M)) \
|
2014-09-12 23:25:35 +00:00
|
|
|
\
|
|
|
|
--verify_checksum=1 \
|
|
|
|
--delete_obsolete_files_period_micros=$((60 * M)) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--max_bytes_for_level_multiplier=$per_level_fanout \
|
2014-09-12 23:25:35 +00:00
|
|
|
\
|
2015-08-21 01:59:10 +00:00
|
|
|
--statistics=0 \
|
2014-09-12 23:25:35 +00:00
|
|
|
--stats_per_interval=1 \
|
2022-06-03 15:20:10 +00:00
|
|
|
--stats_interval_seconds=$stats_interval_seconds \
|
|
|
|
--report_interval_seconds=$report_interval_seconds \
|
2014-09-12 23:25:35 +00:00
|
|
|
--histogram=1 \
|
|
|
|
\
|
|
|
|
--memtablerep=skip_list \
|
|
|
|
--bloom_bits=10 \
|
2021-07-12 19:17:32 +00:00
|
|
|
--open_files=-1 \
|
2022-06-03 15:20:10 +00:00
|
|
|
--subcompactions=$subcompactions \
|
2021-07-12 19:17:32 +00:00
|
|
|
\
|
|
|
|
$bench_args"
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
level_const_params="
|
|
|
|
$const_params_base \
|
|
|
|
--compaction_style=0 \
|
2022-06-13 23:24:32 +00:00
|
|
|
--num_levels=8 \
|
2022-06-03 15:20:10 +00:00
|
|
|
--min_level_to_compress=$min_level_to_compress \
|
|
|
|
--level_compaction_dynamic_level_bytes=true \
|
|
|
|
--pin_l0_filter_and_index_blocks_in_cache=1 \
|
|
|
|
$soft_pending_arg \
|
|
|
|
$hard_pending_arg \
|
|
|
|
"
|
|
|
|
|
2022-06-30 21:07:26 +00:00
|
|
|
# These inherit level_const_params because the non-blob LSM tree uses leveled compaction.
|
2022-06-03 15:20:10 +00:00
|
|
|
blob_const_params="
|
|
|
|
$level_const_params \
|
|
|
|
--enable_blob_files=true \
|
|
|
|
--min_blob_size=$min_blob_size \
|
|
|
|
--blob_file_size=$blob_file_size \
|
|
|
|
--blob_compression_type=$blob_compression_type \
|
|
|
|
--enable_blob_garbage_collection=true \
|
|
|
|
--blob_garbage_collection_age_cutoff=$blob_gc_age_cutoff \
|
|
|
|
--blob_garbage_collection_force_threshold=$blob_gc_force_threshold \
|
2022-06-22 23:04:03 +00:00
|
|
|
--blob_file_starting_level=$blob_file_starting_level \
|
|
|
|
--use_blob_cache=$use_blob_cache \
|
|
|
|
--use_shared_block_and_blob_cache=$use_shared_block_and_blob_cache \
|
|
|
|
--blob_cache_size=$blob_cache_size \
|
|
|
|
--blob_cache_numshardbits=$blob_cache_numshardbits \
|
2022-07-17 14:13:59 +00:00
|
|
|
--prepopulate_blob_cache=$prepopulate_blob_cache \
|
2022-06-03 15:20:10 +00:00
|
|
|
"
|
|
|
|
|
|
|
|
# TODO:
|
|
|
|
# pin_l0_filter_and..., is this OK?
|
|
|
|
univ_const_params="
|
|
|
|
$const_params_base \
|
|
|
|
--compaction_style=1 \
|
2022-06-13 23:24:32 +00:00
|
|
|
--num_levels=40 \
|
2022-06-03 15:20:10 +00:00
|
|
|
--universal_compression_size_percent=$compression_size_percent \
|
|
|
|
--pin_l0_filter_and_index_blocks_in_cache=1 \
|
|
|
|
--universal_min_merge_width=$univ_min_merge_width \
|
|
|
|
--universal_max_merge_width=$univ_max_merge_width \
|
|
|
|
--universal_size_ratio=$univ_size_ratio \
|
|
|
|
--universal_max_size_amplification_percent=$univ_max_size_amp \
|
|
|
|
--universal_allow_trivial_move=$univ_allow_trivial_move \
|
|
|
|
"
|
|
|
|
|
|
|
|
if [ $compaction_style == "leveled" ]; then
|
|
|
|
const_params="$level_const_params"
|
|
|
|
l0_file_num_compaction_trigger=${LEVEL0_FILE_NUM_COMPACTION_TRIGGER:-4}
|
|
|
|
l0_slowdown_writes_trigger=${LEVEL0_SLOWDOWN_WRITES_TRIGGER:-20}
|
|
|
|
l0_stop_writes_trigger=${LEVEL0_STOP_WRITES_TRIGGER:-30}
|
|
|
|
elif [ $compaction_style == "universal" ]; then
|
|
|
|
const_params="$univ_const_params"
|
|
|
|
l0_file_num_compaction_trigger=${LEVEL0_FILE_NUM_COMPACTION_TRIGGER:-8}
|
|
|
|
l0_slowdown_writes_trigger=${LEVEL0_SLOWDOWN_WRITES_TRIGGER:-20}
|
|
|
|
l0_stop_writes_trigger=${LEVEL0_STOP_WRITES_TRIGGER:-30}
|
|
|
|
else
|
|
|
|
# compaction_style == "blob"
|
|
|
|
const_params="$blob_const_params"
|
|
|
|
l0_file_num_compaction_trigger=${LEVEL0_FILE_NUM_COMPACTION_TRIGGER:-4}
|
|
|
|
l0_slowdown_writes_trigger=${LEVEL0_SLOWDOWN_WRITES_TRIGGER:-20}
|
|
|
|
l0_stop_writes_trigger=${LEVEL0_STOP_WRITES_TRIGGER:-30}
|
|
|
|
fi
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
l0_config="
|
2022-06-03 15:20:10 +00:00
|
|
|
--level0_file_num_compaction_trigger=$l0_file_num_compaction_trigger \
|
|
|
|
--level0_slowdown_writes_trigger=$l0_slowdown_writes_trigger \
|
|
|
|
--level0_stop_writes_trigger=$l0_stop_writes_trigger"
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
# You probably don't want to set both --writes and --duration
|
2014-09-12 23:25:35 +00:00
|
|
|
if [ $duration -gt 0 ]; then
|
|
|
|
const_params="$const_params --duration=$duration"
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
if [ $writes -gt 0 ]; then
|
|
|
|
const_params="$const_params --writes=$writes"
|
|
|
|
fi
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2021-07-12 19:17:32 +00:00
|
|
|
params_w="$l0_config \
|
2022-06-03 15:20:10 +00:00
|
|
|
--max_background_jobs=$max_background_jobs \
|
2018-12-18 00:27:08 +00:00
|
|
|
--max_write_buffer_number=8 \
|
2021-07-12 19:17:32 +00:00
|
|
|
$const_params"
|
2016-03-17 17:14:23 +00:00
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
params_bulkload="--max_background_jobs=$max_background_jobs \
|
2016-03-17 17:14:23 +00:00
|
|
|
--max_write_buffer_number=8 \
|
2018-12-18 00:27:08 +00:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2014-10-10 16:55:40 +00:00
|
|
|
--level0_file_num_compaction_trigger=$((10 * M)) \
|
|
|
|
--level0_slowdown_writes_trigger=$((10 * M)) \
|
2021-07-12 19:17:32 +00:00
|
|
|
--level0_stop_writes_trigger=$((10 * M)) \
|
|
|
|
$const_params "
|
|
|
|
|
|
|
|
params_fillseq="--allow_concurrent_memtable_write=false \
|
|
|
|
$params_w "
|
2014-09-12 23:25:35 +00:00
|
|
|
|
2016-03-04 20:32:11 +00:00
|
|
|
#
|
|
|
|
# Tune values for level and universal compaction.
|
|
|
|
# For universal compaction, these level0_* options mean total sorted of runs in
|
|
|
|
# LSM. In level-based compaction, it means number of L0 files.
|
|
|
|
#
|
2016-03-15 06:09:04 +00:00
|
|
|
params_level_compact="$const_params \
|
2018-12-18 00:27:08 +00:00
|
|
|
--max_background_flushes=4 \
|
2016-03-17 17:14:23 +00:00
|
|
|
--max_write_buffer_number=4 \
|
2016-03-04 20:32:11 +00:00
|
|
|
--level0_file_num_compaction_trigger=4 \
|
|
|
|
--level0_slowdown_writes_trigger=16 \
|
|
|
|
--level0_stop_writes_trigger=20"
|
|
|
|
|
2016-03-15 06:09:04 +00:00
|
|
|
params_univ_compact="$const_params \
|
2018-12-18 00:27:08 +00:00
|
|
|
--max_background_flushes=4 \
|
2016-03-17 17:14:23 +00:00
|
|
|
--max_write_buffer_number=4 \
|
2016-03-04 20:32:11 +00:00
|
|
|
--level0_file_num_compaction_trigger=8 \
|
|
|
|
--level0_slowdown_writes_trigger=16 \
|
|
|
|
--level0_stop_writes_trigger=20"
|
|
|
|
|
2022-07-05 18:46:36 +00:00
|
|
|
tsv_header="ops_sec\tmb_sec\tlsm_sz\tblob_sz\tc_wgb\tw_amp\tc_mbps\tc_wsecs\tc_csecs\tb_rgb\tb_wgb\tusec_op\tp50\tp99\tp99.9\tp99.99\tpmax\tuptime\tstall%\tNstall\tu_cpu\ts_cpu\trss\ttest\tdate\tversion\tjob_id\tgithash"
|
2022-06-03 15:20:10 +00:00
|
|
|
|
|
|
|
function get_cmd() {
|
|
|
|
output=$1
|
|
|
|
|
|
|
|
numa=""
|
|
|
|
if [ ! -z $NUMACTL ]; then
|
|
|
|
numa="numactl --interleave=all "
|
|
|
|
fi
|
|
|
|
|
|
|
|
# Try to use timeout when duration is set because some tests (revrange*) hang
|
|
|
|
# for some versions (v6.10, v6.11).
|
|
|
|
timeout_cmd=""
|
|
|
|
if [ $duration -gt 0 ]; then
|
|
|
|
if hash timeout ; then
|
|
|
|
timeout_cmd="timeout $(( $duration + 600 ))"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo "/usr/bin/time -f '%e %U %S' -o $output $numa $timeout_cmd"
|
|
|
|
}
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
function month_to_num() {
|
|
|
|
local date_str=$1
|
|
|
|
date_str="${date_str/Jan/01}"
|
|
|
|
date_str="${date_str/Feb/02}"
|
|
|
|
date_str="${date_str/Mar/03}"
|
|
|
|
date_str="${date_str/Apr/04}"
|
|
|
|
date_str="${date_str/May/05}"
|
|
|
|
date_str="${date_str/Jun/06}"
|
|
|
|
date_str="${date_str/Jul/07}"
|
|
|
|
date_str="${date_str/Aug/08}"
|
|
|
|
date_str="${date_str/Sep/09}"
|
|
|
|
date_str="${date_str/Oct/10}"
|
|
|
|
date_str="${date_str/Nov/11}"
|
|
|
|
date_str="${date_str/Dec/12}"
|
|
|
|
echo $date_str
|
|
|
|
}
|
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
function start_stats {
|
|
|
|
output=$1
|
|
|
|
iostat -y -mx 1 >& $output.io &
|
|
|
|
vmstat 1 >& $output.vm &
|
|
|
|
# tail -1 because "ps | grep db_bench" returns 2 entries and we want the second
|
|
|
|
while :; do ps aux | grep db_bench | grep -v grep | tail -1; sleep 10; done >& $output.ps &
|
|
|
|
# This sets a global value
|
|
|
|
pspid=$!
|
|
|
|
|
|
|
|
while :; do
|
|
|
|
b_gb=$( ls -l $DB_DIR 2> /dev/null | grep blob | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
|
|
|
|
s_gb=$( ls -l $DB_DIR 2> /dev/null | grep sst | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
|
|
|
|
l_gb=$( ls -l $WAL_DIR 2> /dev/null | grep log | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
|
|
|
|
a_gb=$( ls -l $DB_DIR 2> /dev/null | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
|
|
|
|
ts=$( date +%H%M%S )
|
|
|
|
echo -e "${a_gb}\t${s_gb}\t${l_gb}\t${b_gb}\t${ts}"
|
|
|
|
sleep 10
|
|
|
|
done >& $output.sizes &
|
|
|
|
# This sets a global value
|
|
|
|
szpid=$!
|
|
|
|
}
|
|
|
|
|
|
|
|
function stop_stats {
|
|
|
|
output=$1
|
|
|
|
kill $pspid
|
|
|
|
kill $szpid
|
|
|
|
killall iostat
|
|
|
|
killall vmstat
|
|
|
|
sleep 1
|
|
|
|
gzip $output.io
|
|
|
|
gzip $output.vm
|
|
|
|
|
|
|
|
am=$( sort -nk 1,1 $output.sizes | tail -1 | awk '{ print $1 }' )
|
|
|
|
sm=$( sort -nk 2,2 $output.sizes | tail -1 | awk '{ print $2 }' )
|
|
|
|
lm=$( sort -nk 3,3 $output.sizes | tail -1 | awk '{ print $3 }' )
|
|
|
|
bm=$( sort -nk 4,4 $output.sizes | tail -1 | awk '{ print $4 }' )
|
|
|
|
echo -e "max sizes (GB): $am all, $sm sst, $lm log, $bm blob" >> $output.sizes
|
|
|
|
}
|
|
|
|
|
2022-06-29 14:22:22 +00:00
|
|
|
function units_as_gb {
|
|
|
|
size=$1
|
|
|
|
units=$2
|
|
|
|
|
|
|
|
case $units in
|
|
|
|
MB)
|
|
|
|
echo "$size" | awk '{ printf "%.1f", $1 / 1024.0 }'
|
|
|
|
;;
|
|
|
|
GB)
|
|
|
|
echo "$size"
|
|
|
|
;;
|
|
|
|
TB)
|
|
|
|
echo "$size" | awk '{ printf "%.1f", $1 * 1024.0 }'
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
echo "NA"
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
2015-03-30 18:28:25 +00:00
|
|
|
function summarize_result {
|
|
|
|
test_out=$1
|
|
|
|
test_name=$2
|
|
|
|
bench_name=$3
|
|
|
|
|
2022-07-05 18:46:36 +00:00
|
|
|
# In recent versions these can be found directly via db_bench --version, --build_info but
|
|
|
|
# grepping from the log lets this work on older versions.
|
|
|
|
version="$( grep "RocksDB version:" "$DB_DIR"/LOG | head -1 | awk '{ printf "%s", $5 }' )"
|
|
|
|
git_hash="$( grep "Git sha" "$DB_DIR"/LOG | head -1 | awk '{ printf "%s", substr($5, 1, 10) }' )"
|
|
|
|
|
2016-02-05 21:20:56 +00:00
|
|
|
# Note that this function assumes that the benchmark executes long enough so
|
|
|
|
# that "Compaction Stats" is written to stdout at least once. If it won't
|
|
|
|
# happen then empty output from grep when searching for "Sum" will cause
|
|
|
|
# syntax errors.
|
2022-06-03 15:20:10 +00:00
|
|
|
date=$( grep ^Date: $test_out | awk '{ print $6 "-" $3 "-" $4 "T" $5 }' )
|
|
|
|
my_date=$( month_to_num $date )
|
2015-03-30 18:28:25 +00:00
|
|
|
uptime=$( grep ^Uptime\(secs $test_out | tail -1 | awk '{ printf "%.0f", $2 }' )
|
|
|
|
stall_pct=$( grep "^Cumulative stall" $test_out| tail -1 | awk '{ print $5 }' )
|
2022-06-03 15:20:10 +00:00
|
|
|
nstall=$( grep ^Stalls\(count\): $test_out | tail -1 | awk '{ print $2 + $6 + $10 + $14 + $18 + $20 }' )
|
2022-06-08 16:23:36 +00:00
|
|
|
|
2022-10-12 22:13:28 +00:00
|
|
|
if ! grep ^"$bench_name" "$test_out" > /dev/null 2>&1 ; then
|
|
|
|
echo -e "failed\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t$test_name\t$my_date\t$version\t$job_id\t$git_hash"
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
2022-06-08 16:23:36 +00:00
|
|
|
# Output formats
|
2022-10-12 22:13:28 +00:00
|
|
|
# V1: readrandom : 10.218 micros/op 3131616 ops/sec; 1254.3 MB/s (176144999 of 176144999 found)
|
|
|
|
# The MB/s is mssing for multireadrandom
|
|
|
|
# V1a: multireadrandom : 10.164 micros/op 3148272 ops/sec; (177099990 of 177099990 found)
|
2022-06-08 16:23:36 +00:00
|
|
|
# V1: overwrite : 7.939 micros/op 125963 ops/sec; 50.5 MB/s
|
|
|
|
# V2: overwrite : 7.854 micros/op 127320 ops/sec 1800.001 seconds 229176999 operations; 51.0 MB/s
|
|
|
|
|
2022-10-12 22:13:28 +00:00
|
|
|
format_version=$( grep ^"$bench_name" "$test_out" \
|
2022-06-08 16:23:36 +00:00
|
|
|
| awk '{ if (NF >= 10 && $8 == "seconds") { print "V2" } else { print "V1" } }' )
|
|
|
|
if [ $format_version == "V1" ]; then
|
2022-10-12 22:13:28 +00:00
|
|
|
ops_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $5 }' )
|
|
|
|
usecs_op=$( grep ^"$bench_name" "$test_out" | awk '{ printf "%.1f", $3 }' )
|
|
|
|
if [ "$bench_name" == "multireadrandom" ]; then
|
|
|
|
mb_sec="NA"
|
|
|
|
else
|
|
|
|
mb_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $7 }' )
|
|
|
|
fi
|
2022-06-08 16:23:36 +00:00
|
|
|
else
|
2022-10-12 22:13:28 +00:00
|
|
|
ops_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $5 }' )
|
|
|
|
usecs_op=$( grep ^"$bench_name" "$test_out" | awk '{ printf "%.1f", $3 }' )
|
|
|
|
mb_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $11 }' )
|
2022-06-08 16:23:36 +00:00
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
|
2022-06-29 14:22:22 +00:00
|
|
|
# For RocksDB version 4.x there are fewer fields but this still parses correctly
|
|
|
|
# Cumulative writes: 242M writes, 242M keys, 18M commit groups, 12.9 writes per commit group, ingest: 95.96 GB, 54.69 MB/s
|
|
|
|
cum_writes_gb_orig=$( grep "^Cumulative writes" "$test_out" | tail -1 | awk '{ for (x=1; x<=NF; x++) { if ($x == "ingest:") { printf "%.1f", $(x+1) } } }' )
|
|
|
|
cum_writes_units=$( grep "^Cumulative writes" "$test_out" | tail -1 | awk '{ for (x=1; x<=NF; x++) { if ($x == "ingest:") { print $(x+2) } } }' | sed 's/,//g' )
|
|
|
|
cum_writes_gb=$( units_as_gb "$cum_writes_gb_orig" "$cum_writes_units" )
|
|
|
|
|
|
|
|
# Cumulative compaction: 1159.74 GB write, 661.03 MB/s write, 1108.89 GB read, 632.04 MB/s read, 6284.3 seconds
|
|
|
|
cmb_ps=$( grep "^Cumulative compaction" "$test_out" | tail -1 | awk '{ printf "%.1f", $6 }' )
|
|
|
|
sum_wgb_orig=$( grep "^Cumulative compaction" "$test_out" | tail -1 | awk '{ printf "%.1f", $3 }' )
|
|
|
|
sum_wgb_units=$( grep "^Cumulative compaction" "$test_out" | tail -1 | awk '{ print $4 }' )
|
|
|
|
sum_wgb=$( units_as_gb "$sum_wgb_orig" "$sum_wgb_units" )
|
|
|
|
|
|
|
|
# Flush(GB): cumulative 97.193, interval 1.247
|
|
|
|
flush_wgb=$( grep "^Flush(GB)" "$test_out" | tail -1 | awk '{ print $3 }' | tr ',' ' ' | awk '{ print $1 }' )
|
|
|
|
|
|
|
|
if [[ "$sum_wgb" == "NA" || \
|
|
|
|
"$cum_writes_gb" == "NA" || \
|
|
|
|
"$cum_writes_gb_orig" == "0.0" || \
|
|
|
|
-z "$cum_writes_gb_orig" || \
|
|
|
|
-z "$flush_wgb" ]]; then
|
|
|
|
wamp="NA"
|
2021-09-14 18:08:46 +00:00
|
|
|
else
|
2022-06-29 14:22:22 +00:00
|
|
|
wamp=$( echo "( $sum_wgb + $flush_wgb ) / $cum_writes_gb" | bc -l | awk '{ printf "%.1f", $1 }' )
|
2021-07-12 19:17:32 +00:00
|
|
|
fi
|
2022-06-29 14:22:22 +00:00
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
c_wsecs=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $15 }' )
|
|
|
|
c_csecs=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $16 }' )
|
|
|
|
|
2022-10-12 22:13:28 +00:00
|
|
|
lsm_size=$( grep "^ Sum" "$test_out" | tail -1 | awk '{ printf "%.0f%s", $3, $4 }' )
|
|
|
|
blob_size=$( grep "^Blob file count:" "$test_out" | tail -1 | awk '{ printf "%.0f%s", $7, $8 }' )
|
|
|
|
# Remove the trailing comma from blob_size: 3.0GB, -> 3.0GB
|
|
|
|
blob_size="${blob_size/,/}"
|
2022-06-03 15:20:10 +00:00
|
|
|
|
|
|
|
b_rgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $21 }' )
|
|
|
|
b_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $22 }' )
|
|
|
|
|
2015-08-22 19:18:00 +00:00
|
|
|
p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' )
|
|
|
|
p99=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $7 }' )
|
|
|
|
p999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $9 }' )
|
|
|
|
p9999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $11 }' )
|
2022-06-03 15:20:10 +00:00
|
|
|
pmax=$( grep "^Min: " $test_out | grep Median: | grep Max: | awk '{ printf "%.0f", $6 }' )
|
|
|
|
|
2022-10-12 22:13:28 +00:00
|
|
|
# Use the last line because there might be extra lines when the db_bench process exits with an error
|
|
|
|
time_out="$test_out".time
|
|
|
|
u_cpu=$( tail -1 "$time_out" | awk '{ printf "%.1f", $2 / 1000.0 }' )
|
|
|
|
s_cpu=$( tail -1 "$time_out" | awk '{ printf "%.1f", $3 / 1000.0 }' )
|
2022-06-03 15:20:10 +00:00
|
|
|
|
2022-06-29 14:22:22 +00:00
|
|
|
rss="NA"
|
2022-06-03 15:20:10 +00:00
|
|
|
if [ -f $test_out.stats.ps ]; then
|
2022-10-12 22:13:28 +00:00
|
|
|
rss=$( awk '{ printf "%.1f\n", $6 / (1024 * 1024) }' "$test_out".stats.ps | sort -n | tail -1 )
|
2022-06-03 15:20:10 +00:00
|
|
|
fi
|
2021-09-14 18:08:46 +00:00
|
|
|
|
|
|
|
# if the report TSV (Tab Separate Values) file does not yet exist, create it and write the header row to it
|
|
|
|
if [ ! -f "$report" ]; then
|
2022-10-12 22:13:28 +00:00
|
|
|
echo -e "# ops_sec - operations per second" >> "$report"
|
|
|
|
echo -e "# mb_sec - ops_sec * size-of-operation-in-MB" >> "$report"
|
|
|
|
echo -e "# lsm_sz - size of LSM tree" >> "$report"
|
|
|
|
echo -e "# blob_sz - size of BlobDB logs" >> "$report"
|
|
|
|
echo -e "# c_wgb - GB written by compaction" >> "$report"
|
|
|
|
echo -e "# w_amp - Write-amplification as (bytes written by compaction / bytes written by memtable flush)" >> "$report"
|
|
|
|
echo -e "# c_mbps - Average write rate for compaction" >> "$report"
|
|
|
|
echo -e "# c_wsecs - Wall clock seconds doing compaction" >> "$report"
|
|
|
|
echo -e "# c_csecs - CPU seconds doing compaction" >> "$report"
|
|
|
|
echo -e "# b_rgb - Blob compaction read GB" >> "$report"
|
|
|
|
echo -e "# b_wgb - Blob compaction write GB" >> "$report"
|
|
|
|
echo -e "# usec_op - Microseconds per operation" >> "$report"
|
|
|
|
echo -e "# p50, p99, p99.9, p99.99 - 50th, 99th, 99.9th, 99.99th percentile response time in usecs" >> "$report"
|
|
|
|
echo -e "# pmax - max response time in usecs" >> "$report"
|
|
|
|
echo -e "# uptime - RocksDB uptime in seconds" >> "$report"
|
|
|
|
echo -e "# stall% - Percentage of time writes are stalled" >> "$report"
|
|
|
|
echo -e "# Nstall - Number of stalls" >> "$report"
|
|
|
|
echo -e "# u_cpu - #seconds/1000 of user CPU" >> "$report"
|
|
|
|
echo -e "# s_cpu - #seconds/1000 of system CPU" >> "$report"
|
|
|
|
echo -e "# rss - max RSS in GB for db_bench process" >> "$report"
|
|
|
|
echo -e "# test - Name of test" >> "$report"
|
|
|
|
echo -e "# date - Date/time of test" >> "$report"
|
|
|
|
echo -e "# version - RocksDB version" >> "$report"
|
|
|
|
echo -e "# job_id - User-provided job ID" >> "$report"
|
|
|
|
echo -e "# githash - git hash at which db_bench was compiled" >> "$report"
|
|
|
|
echo -e $tsv_header >> "$report"
|
2021-09-14 18:08:46 +00:00
|
|
|
fi
|
|
|
|
|
2022-07-05 18:46:36 +00:00
|
|
|
echo -e "$ops_sec\t$mb_sec\t$lsm_size\t$blob_size\t$sum_wgb\t$wamp\t$cmb_ps\t$c_wsecs\t$c_csecs\t$b_rgb\t$b_wgb\t$usecs_op\t$p50\t$p99\t$p999\t$p9999\t$pmax\t$uptime\t$stall_pct\t$nstall\t$u_cpu\t$s_cpu\t$rss\t$test_name\t$my_date\t$version\t$job_id\t$git_hash" \
|
2022-10-12 22:13:28 +00:00
|
|
|
>> "$report"
|
2015-03-30 18:28:25 +00:00
|
|
|
}
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
function run_bulkload {
|
2015-04-14 00:18:07 +00:00
|
|
|
# This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the
|
|
|
|
# client can discover where to restart a load after a crash. I think this is a good way to load.
|
2015-03-30 18:28:25 +00:00
|
|
|
echo "Bulk loading $num_keys random keys"
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name=$output_dir/benchmark_bulkload_fillrandom.log
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=fillrandom,stats \
|
2014-09-12 23:25:35 +00:00
|
|
|
--use_existing_db=0 \
|
|
|
|
--disable_auto_compactions=1 \
|
2014-12-10 21:04:58 +00:00
|
|
|
--sync=0 \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_bulkload \
|
|
|
|
--threads=1 \
|
2015-04-14 00:18:07 +00:00
|
|
|
--memtablerep=vector \
|
2018-09-26 20:19:13 +00:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2015-04-14 00:18:07 +00:00
|
|
|
--disable_wal=1 \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2014-09-12 23:25:35 +00:00
|
|
|
eval $cmd
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name bulkload fillrandom
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
echo "Compacting..."
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name=$output_dir/benchmark_bulkload_compact.log
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=compact,stats \
|
2014-09-12 23:25:35 +00:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--disable_auto_compactions=1 \
|
2014-12-10 21:04:58 +00:00
|
|
|
--sync=0 \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2014-09-12 23:25:35 +00:00
|
|
|
eval $cmd
|
|
|
|
}
|
|
|
|
|
2016-03-15 06:09:04 +00:00
|
|
|
#
|
|
|
|
# Parameter description:
|
|
|
|
#
|
|
|
|
# $1 - 1 if I/O statistics should be collected.
|
|
|
|
# $2 - compaction type to use (level=0, universal=1).
|
|
|
|
# $3 - number of subcompactions.
|
|
|
|
# $4 - number of maximum background compactions.
|
|
|
|
#
|
2016-03-04 20:32:11 +00:00
|
|
|
function run_manual_compaction_worker {
|
|
|
|
# This runs with a vector memtable and the WAL disabled to load faster.
|
|
|
|
# It is still crash safe and the client can discover where to restart a
|
|
|
|
# load after a crash. I think this is a good way to load.
|
|
|
|
echo "Bulk loading $num_keys random keys for manual compaction."
|
2016-02-10 23:30:47 +00:00
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name=$output_dir/benchmark_man_compact_fillrandom_$3.log
|
2016-02-10 23:30:47 +00:00
|
|
|
|
2016-03-04 20:32:11 +00:00
|
|
|
if [ "$2" == "1" ]; then
|
|
|
|
extra_params=$params_univ_compact
|
|
|
|
else
|
|
|
|
extra_params=$params_level_compact
|
|
|
|
fi
|
|
|
|
|
|
|
|
# Make sure that fillrandom uses the same compaction options as compact.
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=fillrandom,stats \
|
2016-03-04 20:32:11 +00:00
|
|
|
--use_existing_db=0 \
|
|
|
|
--disable_auto_compactions=0 \
|
|
|
|
--sync=0 \
|
|
|
|
$extra_params \
|
2016-02-10 23:30:47 +00:00
|
|
|
--threads=$num_threads \
|
2016-03-04 20:32:11 +00:00
|
|
|
--compaction_measure_io_stats=$1 \
|
|
|
|
--compaction_style=$2 \
|
|
|
|
--subcompactions=$3 \
|
|
|
|
--memtablerep=vector \
|
2018-09-26 20:19:13 +00:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2016-03-04 20:32:11 +00:00
|
|
|
--disable_wal=1 \
|
2018-12-18 00:27:08 +00:00
|
|
|
--max_background_compactions=$4 \
|
2016-02-10 23:30:47 +00:00
|
|
|
--seed=$( date +%s ) \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
2016-03-04 20:32:11 +00:00
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2016-03-04 20:32:11 +00:00
|
|
|
eval $cmd
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_namefillrandom_output_file man_compact_fillrandom_$3 fillrandom
|
2016-03-04 20:32:11 +00:00
|
|
|
|
|
|
|
echo "Compacting with $3 subcompactions specified ..."
|
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name=$output_dir/benchmark_man_compact_$3.log
|
|
|
|
|
2016-03-04 20:32:11 +00:00
|
|
|
# This is the part we're really interested in. Given that compact benchmark
|
|
|
|
# doesn't output regular statistics then we'll just use the time command to
|
|
|
|
# measure how long this step takes.
|
|
|
|
cmd="{ \
|
2022-10-12 22:13:28 +00:00
|
|
|
time ./db_bench --benchmarks=compact,stats \
|
2016-03-04 20:32:11 +00:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--disable_auto_compactions=0 \
|
|
|
|
--sync=0 \
|
|
|
|
$extra_params \
|
|
|
|
--threads=$num_threads \
|
2016-02-10 23:30:47 +00:00
|
|
|
--compaction_measure_io_stats=$1 \
|
|
|
|
--compaction_style=$2 \
|
|
|
|
--subcompactions=$3 \
|
2016-03-15 06:09:04 +00:00
|
|
|
--max_background_compactions=$4 \
|
2016-03-04 20:32:11 +00:00
|
|
|
;}
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
2016-03-04 20:32:11 +00:00
|
|
|
|
2021-09-14 18:08:46 +00:00
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2016-02-10 23:30:47 +00:00
|
|
|
eval $cmd
|
|
|
|
|
2016-03-04 20:32:11 +00:00
|
|
|
# Can't use summarize_result here. One way to analyze the results is to run
|
|
|
|
# "grep real" on the resulting log files.
|
2016-02-10 23:30:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_univ_compaction {
|
|
|
|
# Always ask for I/O statistics to be measured.
|
|
|
|
io_stats=1
|
|
|
|
|
|
|
|
# Values: kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1.
|
|
|
|
compaction_style=1
|
|
|
|
|
2016-03-15 06:09:04 +00:00
|
|
|
# Define a set of benchmarks.
|
|
|
|
subcompactions=(1 2 4 8 16)
|
2018-12-18 00:27:08 +00:00
|
|
|
max_background_compactions=(16 16 8 4 2)
|
2016-03-04 20:32:11 +00:00
|
|
|
|
2016-03-15 06:09:04 +00:00
|
|
|
i=0
|
|
|
|
total=${#subcompactions[@]}
|
2016-02-10 23:30:47 +00:00
|
|
|
|
2016-03-15 06:09:04 +00:00
|
|
|
# Execute a set of benchmarks to cover variety of scenarios.
|
|
|
|
while [ "$i" -lt "$total" ]
|
2016-02-10 23:30:47 +00:00
|
|
|
do
|
2016-03-15 06:09:04 +00:00
|
|
|
run_manual_compaction_worker $io_stats $compaction_style ${subcompactions[$i]} \
|
2018-12-18 00:27:08 +00:00
|
|
|
${max_background_compactions[$i]}
|
2016-03-15 06:09:04 +00:00
|
|
|
((i++))
|
2016-02-10 23:30:47 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
function run_fillseq {
|
2016-02-05 21:20:56 +00:00
|
|
|
# This runs with a vector memtable. WAL can be either disabled or enabled
|
|
|
|
# depending on the input parameter (1 for disabled, 0 for enabled). The main
|
|
|
|
# benefit behind disabling WAL is to make loading faster. It is still crash
|
|
|
|
# safe and the client can discover where to restart a load after a crash. I
|
|
|
|
# think this is a good way to load.
|
|
|
|
|
|
|
|
# Make sure that we'll have unique names for all the files so that data won't
|
|
|
|
# be overwritten.
|
|
|
|
if [ $1 == 1 ]; then
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_fillseq.wal_disabled.v${value_size}.log"
|
2016-02-05 21:20:56 +00:00
|
|
|
test_name=fillseq.wal_disabled.v${value_size}
|
|
|
|
else
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_fillseq.wal_enabled.v${value_size}.log"
|
2016-02-05 21:20:56 +00:00
|
|
|
test_name=fillseq.wal_enabled.v${value_size}
|
|
|
|
fi
|
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
# For Leveled compaction hardwire this to 0 so that data that is trivial-moved
|
|
|
|
# to larger levels (3, 4, etc) will be compressed.
|
|
|
|
if [ $compaction_style == "leveled" ]; then
|
|
|
|
comp_arg="--min_level_to_compress=0"
|
|
|
|
elif [ $compaction_style == "universal" ]; then
|
|
|
|
if [ ! -z $UNIVERSAL_ALLOW_TRIVIAL_MOVE ]; then
|
|
|
|
# See GetCompressionFlush where compression_size_percent < 1 means use the default
|
|
|
|
# compression which is needed because trivial moves are enabled
|
|
|
|
comp_arg="--universal_compression_size_percent=-1"
|
|
|
|
else
|
|
|
|
# See GetCompressionFlush where compression_size_percent > 0 means no compression.
|
|
|
|
# Don't set anything here because compression_size_percent is set in univ_const_params
|
|
|
|
comp_arg=""
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
# compaction_style == "blob"
|
|
|
|
comp_arg="--min_level_to_compress=0"
|
|
|
|
fi
|
|
|
|
|
2015-03-30 18:28:25 +00:00
|
|
|
echo "Loading $num_keys keys sequentially"
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=fillseq,stats \
|
2022-06-03 15:20:10 +00:00
|
|
|
$params_fillseq \
|
|
|
|
$comp_arg \
|
2014-09-12 23:25:35 +00:00
|
|
|
--use_existing_db=0 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
--sync=0 \
|
2015-03-30 18:28:25 +00:00
|
|
|
--threads=1 \
|
2015-04-14 00:18:07 +00:00
|
|
|
--memtablerep=vector \
|
2018-09-26 20:19:13 +00:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2016-02-05 21:20:56 +00:00
|
|
|
--disable_wal=$1 \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2016-02-05 21:20:56 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
2021-09-14 18:08:46 +00:00
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2014-09-12 23:25:35 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2016-02-05 21:20:56 +00:00
|
|
|
|
|
|
|
# The constant "fillseq" which we pass to db_bench is the benchmark name.
|
|
|
|
summarize_result $log_file_name $test_name fillseq
|
2014-09-12 23:25:35 +00:00
|
|
|
}
|
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
function run_lsm {
|
|
|
|
# This flushes the memtable and L0 to get the LSM tree into a deterministic
|
|
|
|
# state for read-only tests that will follow.
|
|
|
|
echo "Flush memtable, wait, compact L0, wait"
|
|
|
|
job=$1
|
|
|
|
|
|
|
|
if [ $job = flush_mt_l0 ]; then
|
|
|
|
benchmarks=levelstats,flush,waitforcompaction,compact0,waitforcompaction,memstats,levelstats
|
|
|
|
elif [ $job = waitforcompaction ]; then
|
|
|
|
benchmarks=levelstats,waitforcompaction,memstats,levelstats
|
|
|
|
else
|
|
|
|
echo Job unknown: $job
|
|
|
|
exit $EXIT_NOT_COMPACTION_TEST
|
|
|
|
fi
|
|
|
|
|
|
|
|
log_file_name=$output_dir/benchmark_${job}.log
|
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=$benchmarks,stats \
|
2022-06-03 15:20:10 +00:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--sync=0 \
|
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
|
|
|
--seed=$( date +%s ) \
|
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
|
|
|
start_stats $log_file_name.stats
|
|
|
|
# waitforcompaction can hang with universal (compaction_style=1)
|
|
|
|
# see bug https://github.com/facebook/rocksdb/issues/9275
|
|
|
|
eval $cmd
|
|
|
|
stop_stats $log_file_name.stats
|
|
|
|
# Don't summarize, the log doesn't have the output needed for it
|
|
|
|
}
|
|
|
|
|
2015-03-30 18:28:25 +00:00
|
|
|
function run_change {
|
2022-06-03 15:20:10 +00:00
|
|
|
output_name=$1
|
|
|
|
grep_name=$2
|
|
|
|
benchmarks=$3
|
|
|
|
echo "Do $num_keys random $output_name"
|
|
|
|
log_file_name="$output_dir/benchmark_${output_name}.t${num_threads}.s${syncval}.log"
|
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=$benchmarks,stats \
|
2014-09-12 23:25:35 +00:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 18:28:25 +00:00
|
|
|
--sync=$syncval \
|
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2014-09-12 23:25:35 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
|
|
|
summarize_result $log_file_name ${output_name}.t${num_threads}.s${syncval} $grep_name
|
2014-09-12 23:25:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_filluniquerandom {
|
2015-03-30 18:28:25 +00:00
|
|
|
echo "Loading $num_keys unique keys randomly"
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name=$output_dir/benchmark_filluniquerandom.log
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=filluniquerandom,stats \
|
2014-09-12 23:25:35 +00:00
|
|
|
--use_existing_db=0 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
--sync=0 \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2014-09-12 23:25:35 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name filluniquerandom filluniquerandom
|
2014-09-12 23:25:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_readrandom {
|
2015-03-30 18:28:25 +00:00
|
|
|
echo "Reading $num_keys random keys"
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_readrandom.t${num_threads}.log"
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=readrandom,stats \
|
2014-09-12 23:25:35 +00:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2014-09-12 23:25:35 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name readrandom.t${num_threads} readrandom
|
2014-09-12 23:25:35 +00:00
|
|
|
}
|
|
|
|
|
2021-07-12 19:17:32 +00:00
|
|
|
function run_multireadrandom {
|
|
|
|
echo "Multi-Reading $num_keys random keys"
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_multireadrandom.t${num_threads}.log"
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=multireadrandom,stats \
|
2021-07-12 19:17:32 +00:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--batch_size=10 \
|
|
|
|
$params_w \
|
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2021-07-12 19:17:32 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name multireadrandom.t${num_threads} multireadrandom
|
2021-07-12 19:17:32 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 18:28:25 +00:00
|
|
|
function run_readwhile {
|
|
|
|
operation=$1
|
|
|
|
echo "Reading $num_keys random keys while $operation"
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_readwhile${operation}.t${num_threads}.log"
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=readwhile${operation},stats \
|
2015-03-18 20:50:52 +00:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--sync=$syncval \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
2015-03-18 20:50:52 +00:00
|
|
|
--merge_operator=\"put\" \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2015-03-18 20:50:52 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name readwhile${operation}.t${num_threads} readwhile${operation}
|
2015-03-18 20:50:52 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 18:28:25 +00:00
|
|
|
function run_rangewhile {
|
|
|
|
operation=$1
|
|
|
|
full_name=$2
|
|
|
|
reverse_arg=$3
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_${full_name}.t${num_threads}.log"
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2015-03-30 18:28:25 +00:00
|
|
|
echo "Range scan $num_keys random keys while ${operation} for reverse_iter=${reverse_arg}"
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=seekrandomwhile${operation},stats \
|
2014-12-10 21:04:58 +00:00
|
|
|
--use_existing_db=1 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
--sync=$syncval \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
2014-12-10 21:04:58 +00:00
|
|
|
--seek_nexts=$num_nexts_per_seek \
|
2015-03-30 18:28:25 +00:00
|
|
|
--reverse_iterator=$reverse_arg \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
echo $cmd | tee $log_file_name
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2014-12-10 21:04:58 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name ${full_name}.t${num_threads} seekrandomwhile${operation}
|
2014-12-10 21:04:58 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 18:28:25 +00:00
|
|
|
function run_range {
|
|
|
|
full_name=$1
|
|
|
|
reverse_arg=$2
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name="${output_dir}/benchmark_${full_name}.t${num_threads}.log"
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2015-03-30 18:28:25 +00:00
|
|
|
echo "Range scan $num_keys random keys for reverse_iter=${reverse_arg}"
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench --benchmarks=seekrandom,stats \
|
2015-03-14 15:36:57 +00:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 18:28:25 +00:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--seek_nexts=$num_nexts_per_seek \
|
|
|
|
--reverse_iterator=$reverse_arg \
|
2015-04-23 16:18:25 +00:00
|
|
|
--seed=$( date +%s ) \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2021-09-14 18:08:46 +00:00
|
|
|
summarize_result $log_file_name ${full_name}.t${num_threads} seekrandom
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
}
|
|
|
|
|
2015-05-29 21:36:35 +00:00
|
|
|
function run_randomtransaction {
|
|
|
|
echo "..."
|
2021-09-14 18:08:46 +00:00
|
|
|
log_file_name=$output_dir/benchmark_randomtransaction.log
|
2022-06-03 15:20:10 +00:00
|
|
|
time_cmd=$( get_cmd $log_file_name.time )
|
2022-10-12 22:13:28 +00:00
|
|
|
cmd="$time_cmd ./db_bench $params_w --benchmarks=randomtransaction,stats \
|
2015-05-29 21:36:35 +00:00
|
|
|
--num=$num_keys \
|
|
|
|
--transaction_db \
|
|
|
|
--threads=5 \
|
|
|
|
--transaction_sets=5 \
|
2022-06-03 15:20:10 +00:00
|
|
|
--report_file=${log_file_name}.r.csv \
|
2021-09-14 18:08:46 +00:00
|
|
|
2>&1 | tee $log_file_name"
|
|
|
|
if [[ "$job_id" != "" ]]; then
|
|
|
|
echo "Job ID: ${job_id}" > $log_file_name
|
|
|
|
echo $cmd | tee -a $log_file_name
|
|
|
|
else
|
|
|
|
echo $cmd | tee $log_file_name
|
|
|
|
fi
|
2022-06-03 15:20:10 +00:00
|
|
|
start_stats $log_file_name.stats
|
2015-05-29 21:36:35 +00:00
|
|
|
eval $cmd
|
2022-06-03 15:20:10 +00:00
|
|
|
stop_stats $log_file_name.stats
|
2015-05-29 21:36:35 +00:00
|
|
|
}
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
function now() {
|
|
|
|
echo `date +"%s"`
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
echo "===== Benchmark ====="
|
|
|
|
|
|
|
|
# Run!!!
|
2021-07-12 19:17:32 +00:00
|
|
|
IFS=',' read -a jobs <<< $bench_cmd
|
2018-01-29 20:43:56 +00:00
|
|
|
# shellcheck disable=SC2068
|
2014-09-12 23:25:35 +00:00
|
|
|
for job in ${jobs[@]}; do
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-05 23:36:47 +00:00
|
|
|
|
|
|
|
if [ $job != debug ]; then
|
2021-09-14 18:08:46 +00:00
|
|
|
echo "Starting $job (ID: $job_id) at `date`" | tee -a $schedule
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-05 23:36:47 +00:00
|
|
|
fi
|
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
start=$(now)
|
|
|
|
if [ $job = bulkload ]; then
|
|
|
|
run_bulkload
|
2022-06-03 15:20:10 +00:00
|
|
|
elif [ $job = flush_mt_l0 ]; then
|
|
|
|
run_lsm flush_mt_l0
|
|
|
|
elif [ $job = waitforcompaction ]; then
|
|
|
|
run_lsm waitforcompaction
|
2016-02-05 21:20:56 +00:00
|
|
|
elif [ $job = fillseq_disable_wal ]; then
|
|
|
|
run_fillseq 1
|
|
|
|
elif [ $job = fillseq_enable_wal ]; then
|
|
|
|
run_fillseq 0
|
2014-09-12 23:25:35 +00:00
|
|
|
elif [ $job = overwrite ]; then
|
2022-06-03 15:20:10 +00:00
|
|
|
run_change overwrite overwrite overwrite
|
|
|
|
elif [ $job = overwritesome ]; then
|
|
|
|
# This uses a different name for overwrite results so it can be run twice in one benchmark run.
|
|
|
|
run_change overwritesome overwrite overwrite
|
|
|
|
elif [ $job = overwriteandwait ]; then
|
|
|
|
run_change overwriteandwait overwrite overwrite,waitforcompaction
|
2015-03-30 18:28:25 +00:00
|
|
|
elif [ $job = updaterandom ]; then
|
2022-06-03 15:20:10 +00:00
|
|
|
run_change updaterandom updaterandom updaterandom
|
2015-03-30 18:28:25 +00:00
|
|
|
elif [ $job = mergerandom ]; then
|
2022-06-03 15:20:10 +00:00
|
|
|
run_change mergerandom mergerandom mergerandom
|
2014-09-12 23:25:35 +00:00
|
|
|
elif [ $job = filluniquerandom ]; then
|
|
|
|
run_filluniquerandom
|
|
|
|
elif [ $job = readrandom ]; then
|
|
|
|
run_readrandom
|
2021-07-12 19:17:32 +00:00
|
|
|
elif [ $job = multireadrandom ]; then
|
|
|
|
run_multireadrandom
|
2015-03-30 18:28:25 +00:00
|
|
|
elif [ $job = fwdrange ]; then
|
|
|
|
run_range $job false
|
|
|
|
elif [ $job = revrange ]; then
|
|
|
|
run_range $job true
|
2014-09-12 23:25:35 +00:00
|
|
|
elif [ $job = readwhilewriting ]; then
|
2015-03-30 18:28:25 +00:00
|
|
|
run_readwhile writing
|
2015-03-18 20:50:52 +00:00
|
|
|
elif [ $job = readwhilemerging ]; then
|
2015-03-30 18:28:25 +00:00
|
|
|
run_readwhile merging
|
|
|
|
elif [ $job = fwdrangewhilewriting ]; then
|
|
|
|
run_rangewhile writing $job false
|
|
|
|
elif [ $job = revrangewhilewriting ]; then
|
|
|
|
run_rangewhile writing $job true
|
|
|
|
elif [ $job = fwdrangewhilemerging ]; then
|
|
|
|
run_rangewhile merging $job false
|
|
|
|
elif [ $job = revrangewhilemerging ]; then
|
|
|
|
run_rangewhile merging $job true
|
2015-05-29 21:36:35 +00:00
|
|
|
elif [ $job = randomtransaction ]; then
|
|
|
|
run_randomtransaction
|
2016-02-10 23:30:47 +00:00
|
|
|
elif [ $job = universal_compaction ]; then
|
|
|
|
run_univ_compaction
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-05 23:36:47 +00:00
|
|
|
elif [ $job = debug ]; then
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 22:12:53 +00:00
|
|
|
num_keys=1000; # debug
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-05 23:36:47 +00:00
|
|
|
echo "Setting num_keys to $num_keys"
|
2014-09-12 23:25:35 +00:00
|
|
|
else
|
|
|
|
echo "unknown job $job"
|
2021-08-16 13:24:35 +00:00
|
|
|
exit $EXIT_UNKNOWN_JOB
|
2014-09-12 23:25:35 +00:00
|
|
|
fi
|
|
|
|
end=$(now)
|
|
|
|
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-05 23:36:47 +00:00
|
|
|
if [ $job != debug ]; then
|
2021-09-14 18:08:46 +00:00
|
|
|
echo "Completed $job (ID: $job_id) in $((end-start)) seconds" | tee -a $schedule
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-05 23:36:47 +00:00
|
|
|
fi
|
|
|
|
|
2022-06-03 15:20:10 +00:00
|
|
|
echo -e $tsv_header
|
2021-09-14 18:08:46 +00:00
|
|
|
tail -1 $report
|
2015-03-30 18:28:25 +00:00
|
|
|
|
2014-09-12 23:25:35 +00:00
|
|
|
done
|