2017-08-03 22:43:05 +00:00
|
|
|
#!/usr/bin/env bash
|
2019-04-18 17:51:19 +00:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2013-01-16 20:52:37 +00:00
|
|
|
|
|
|
|
set -e
|
|
|
|
|
|
|
|
NUM=10000000
|
|
|
|
|
|
|
|
if [ $# -eq 1 ];then
|
|
|
|
DATA_DIR=$1
|
|
|
|
elif [ $# -eq 2 ];then
|
|
|
|
DATA_DIR=$1
|
|
|
|
STAT_FILE=$2
|
|
|
|
fi
|
|
|
|
|
|
|
|
# On the production build servers, set data and stat
|
|
|
|
# files/directories not in /tmp or else the tempdir cleaning
|
|
|
|
# scripts will make you very unhappy.
|
2013-08-14 20:29:05 +00:00
|
|
|
DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
|
|
|
|
STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
|
2013-01-16 20:52:37 +00:00
|
|
|
|
|
|
|
function cleanup {
|
|
|
|
rm -rf $DATA_DIR
|
2021-09-04 02:03:56 +00:00
|
|
|
rm -f $STAT_FILE.*
|
2013-01-16 20:52:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
trap cleanup EXIT
|
2013-12-11 21:51:20 +00:00
|
|
|
|
|
|
|
make release
|
2013-01-15 04:58:51 +00:00
|
|
|
|
2013-11-27 00:27:31 +00:00
|
|
|
# measure fillseq + fill up the DB for overwrite benchmark
|
2013-01-16 20:52:37 +00:00
|
|
|
./db_bench \
|
|
|
|
--benchmarks=fillseq \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
|
|
|
--writes=$NUM \
|
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
2013-01-16 20:52:37 +00:00
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 > ${STAT_FILE}.fillseq
|
2013-01-15 04:58:51 +00:00
|
|
|
|
2013-11-27 00:27:31 +00:00
|
|
|
# measure overwrite performance
|
2013-01-16 20:52:37 +00:00
|
|
|
./db_bench \
|
|
|
|
--benchmarks=overwrite \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
2013-12-11 21:51:20 +00:00
|
|
|
--writes=$((NUM / 10)) \
|
2013-01-16 20:52:37 +00:00
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
2013-01-16 20:52:37 +00:00
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=8 > ${STAT_FILE}.overwrite
|
2013-01-15 04:58:51 +00:00
|
|
|
|
2013-11-27 21:33:14 +00:00
|
|
|
# fill up the db for readrandom benchmark (1GB total size)
|
2013-11-27 00:27:31 +00:00
|
|
|
./db_bench \
|
|
|
|
--benchmarks=fillseq \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
|
|
|
--writes=$NUM \
|
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
2013-11-27 00:27:31 +00:00
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=1 > /dev/null
|
|
|
|
|
2013-11-27 21:33:14 +00:00
|
|
|
# measure readrandom with 6GB block cache
|
2013-01-16 20:52:37 +00:00
|
|
|
./db_bench \
|
|
|
|
--benchmarks=readrandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
2013-12-11 21:51:20 +00:00
|
|
|
--reads=$((NUM / 5)) \
|
2013-01-16 20:52:37 +00:00
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
2013-01-16 20:52:37 +00:00
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--threads=16 > ${STAT_FILE}.readrandom
|
2013-01-15 04:58:51 +00:00
|
|
|
|
2014-02-07 17:47:47 +00:00
|
|
|
# measure readrandom with 6GB block cache and tailing iterator
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=readrandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
|
|
|
--reads=$((NUM / 5)) \
|
|
|
|
--cache_size=6442450944 \
|
|
|
|
--cache_numshardbits=6 \
|
|
|
|
--table_cache_numshardbits=4 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--use_tailing_iterator=1 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=16 > ${STAT_FILE}.readrandomtailing
|
|
|
|
|
2013-12-11 21:51:20 +00:00
|
|
|
# measure readrandom with 100MB block cache
|
2013-11-27 21:33:14 +00:00
|
|
|
./db_bench \
|
|
|
|
--benchmarks=readrandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
2013-12-11 21:51:20 +00:00
|
|
|
--reads=$((NUM / 5)) \
|
|
|
|
--cache_size=104857600 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=16 > ${STAT_FILE}.readrandomsmallblockcache
|
|
|
|
|
|
|
|
# measure readrandom with 8k data in memtable
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=overwrite,readrandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$NUM \
|
|
|
|
--reads=$((NUM / 5)) \
|
|
|
|
--writes=512 \
|
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
|
|
|
--write_buffer_size=1000000000 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=16 > ${STAT_FILE}.readrandom_mem_sst
|
|
|
|
|
|
|
|
|
|
|
|
# fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=filluniquerandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$((NUM / 4)) \
|
|
|
|
--writes=$((NUM / 4)) \
|
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=1 > /dev/null
|
|
|
|
|
2014-01-09 01:44:58 +00:00
|
|
|
# dummy test just to compact the data
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=readrandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$((NUM / 1000)) \
|
|
|
|
--reads=$((NUM / 1000)) \
|
|
|
|
--cache_size=6442450944 \
|
|
|
|
--cache_numshardbits=6 \
|
|
|
|
--table_cache_numshardbits=4 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=16 > /dev/null
|
|
|
|
|
2013-12-11 21:51:20 +00:00
|
|
|
# measure readrandom after load with filluniquerandom with 6GB block cache
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=readrandom \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$((NUM / 4)) \
|
|
|
|
--reads=$((NUM / 4)) \
|
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
2013-11-27 21:33:14 +00:00
|
|
|
--open_files=55000 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--disable_auto_compactions=1 \
|
2013-11-27 21:33:14 +00:00
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
|
2013-11-27 21:33:14 +00:00
|
|
|
|
2014-01-09 01:44:58 +00:00
|
|
|
# measure readwhilewriting after load with filluniquerandom with 6GB block cache
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=readwhilewriting \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--num=$((NUM / 4)) \
|
|
|
|
--reads=$((NUM / 4)) \
|
2016-01-04 20:01:27 +00:00
|
|
|
--benchmark_write_rate_limit=$(( 110 * 1024 )) \
|
2014-01-09 01:44:58 +00:00
|
|
|
--write_buffer_size=100000000 \
|
|
|
|
--cache_size=6442450944 \
|
|
|
|
--cache_numshardbits=6 \
|
|
|
|
--table_cache_numshardbits=4 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=16 > ${STAT_FILE}.readwhilewriting
|
|
|
|
|
2013-11-27 00:27:31 +00:00
|
|
|
# measure memtable performance -- none of the data gets flushed to disk
|
2013-11-25 23:34:23 +00:00
|
|
|
./db_bench \
|
|
|
|
--benchmarks=fillrandom,readrandom, \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--num=$((NUM / 10)) \
|
|
|
|
--reads=$NUM \
|
|
|
|
--cache_size=6442450944 \
|
2014-01-09 01:44:58 +00:00
|
|
|
--cache_numshardbits=6 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--table_cache_numshardbits=4 \
|
2013-11-25 23:34:23 +00:00
|
|
|
--write_buffer_size=1000000000 \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--value_size=10 \
|
2013-12-11 21:51:20 +00:00
|
|
|
--threads=16 > ${STAT_FILE}.memtablefillreadrandom
|
2013-11-25 23:34:23 +00:00
|
|
|
|
2014-06-10 16:59:16 +00:00
|
|
|
common_in_mem_args="--db=/dev/shm/rocksdb \
|
|
|
|
--num_levels=6 \
|
|
|
|
--key_size=20 \
|
|
|
|
--prefix_size=12 \
|
|
|
|
--keys_per_prefix=10 \
|
|
|
|
--value_size=100 \
|
|
|
|
--compression_type=none \
|
|
|
|
--compression_ratio=1 \
|
|
|
|
--write_buffer_size=134217728 \
|
|
|
|
--max_write_buffer_number=4 \
|
|
|
|
--level0_file_num_compaction_trigger=8 \
|
|
|
|
--level0_slowdown_writes_trigger=16 \
|
|
|
|
--level0_stop_writes_trigger=24 \
|
|
|
|
--target_file_size_base=134217728 \
|
|
|
|
--max_bytes_for_level_base=1073741824 \
|
|
|
|
--disable_wal=0 \
|
|
|
|
--wal_dir=/dev/shm/rocksdb \
|
|
|
|
--sync=0 \
|
|
|
|
--verify_checksum=1 \
|
|
|
|
--delete_obsolete_files_period_micros=314572800 \
|
|
|
|
--use_plain_table=1 \
|
|
|
|
--open_files=-1 \
|
|
|
|
--mmap_read=1 \
|
|
|
|
--mmap_write=0 \
|
|
|
|
--bloom_bits=10 \
|
|
|
|
--bloom_locality=1 \
|
|
|
|
--perf_level=0"
|
|
|
|
|
|
|
|
# prepare a in-memory DB with 50M keys, total DB size is ~6G
|
|
|
|
./db_bench \
|
|
|
|
$common_in_mem_args \
|
|
|
|
--statistics=0 \
|
|
|
|
--max_background_compactions=16 \
|
|
|
|
--max_background_flushes=16 \
|
|
|
|
--benchmarks=filluniquerandom \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--num=52428800 \
|
|
|
|
--threads=1 > /dev/null
|
|
|
|
|
|
|
|
# Readwhilewriting
|
|
|
|
./db_bench \
|
|
|
|
$common_in_mem_args \
|
|
|
|
--statistics=1 \
|
|
|
|
--max_background_compactions=4 \
|
|
|
|
--max_background_flushes=0 \
|
|
|
|
--benchmarks=readwhilewriting\
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--duration=600 \
|
|
|
|
--threads=32 \
|
2016-01-04 20:01:27 +00:00
|
|
|
--benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
|
2014-06-10 16:59:16 +00:00
|
|
|
|
|
|
|
# Seekrandomwhilewriting
|
|
|
|
./db_bench \
|
2014-06-10 18:24:24 +00:00
|
|
|
$common_in_mem_args \
|
2014-06-10 16:59:16 +00:00
|
|
|
--statistics=1 \
|
|
|
|
--max_background_compactions=4 \
|
|
|
|
--max_background_flushes=0 \
|
|
|
|
--benchmarks=seekrandomwhilewriting \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--use_tailing_iterator=1 \
|
|
|
|
--duration=600 \
|
|
|
|
--threads=32 \
|
2016-01-04 20:01:27 +00:00
|
|
|
--benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
|
2014-06-10 16:59:16 +00:00
|
|
|
|
2014-09-05 21:20:18 +00:00
|
|
|
# measure fillseq with bunch of column families
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=fillseq \
|
|
|
|
--num_column_families=500 \
|
|
|
|
--write_buffer_size=1048576 \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--num=$NUM \
|
|
|
|
--writes=$NUM \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 > ${STAT_FILE}.fillseq_lots_column_families
|
|
|
|
|
|
|
|
# measure overwrite performance with bunch of column families
|
|
|
|
./db_bench \
|
|
|
|
--benchmarks=overwrite \
|
|
|
|
--num_column_families=500 \
|
|
|
|
--write_buffer_size=1048576 \
|
|
|
|
--db=$DATA_DIR \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--num=$NUM \
|
|
|
|
--writes=$((NUM / 10)) \
|
|
|
|
--open_files=55000 \
|
|
|
|
--statistics=1 \
|
|
|
|
--histogram=1 \
|
|
|
|
--disable_wal=1 \
|
|
|
|
--sync=0 \
|
|
|
|
--threads=8 > ${STAT_FILE}.overwrite_lots_column_families
|
2014-06-10 16:59:16 +00:00
|
|
|
|
2013-11-27 00:27:31 +00:00
|
|
|
# send data to ods
|
|
|
|
function send_to_ods {
|
|
|
|
key="$1"
|
|
|
|
value="$2"
|
|
|
|
|
2013-12-11 21:51:20 +00:00
|
|
|
if [ -z $JENKINS_HOME ]; then
|
|
|
|
# running on devbox, just print out the values
|
|
|
|
echo $1 $2
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
2013-11-27 00:27:31 +00:00
|
|
|
if [ -z "$value" ];then
|
|
|
|
echo >&2 "ERROR: Key $key doesn't have a value."
|
|
|
|
return
|
|
|
|
fi
|
2023-03-22 20:33:50 +00:00
|
|
|
curl --silent "https://www.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=$key&value=$value" \
|
2013-11-27 00:27:31 +00:00
|
|
|
--connect-timeout 60
|
|
|
|
}
|
|
|
|
|
|
|
|
function send_benchmark_to_ods {
|
|
|
|
bench="$1"
|
|
|
|
bench_key="$2"
|
|
|
|
file="$3"
|
|
|
|
|
|
|
|
QPS=$(grep $bench $file | awk '{print $5}')
|
2014-06-04 16:59:44 +00:00
|
|
|
P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
|
|
|
|
P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
|
|
|
|
P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
|
2013-11-27 00:27:31 +00:00
|
|
|
|
|
|
|
send_to_ods rocksdb.build.$bench_key.qps $QPS
|
|
|
|
send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
|
|
|
|
send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
|
|
|
|
send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
|
|
|
|
}
|
|
|
|
|
|
|
|
send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
|
|
|
|
send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
|
|
|
|
send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
|
2014-02-07 17:47:47 +00:00
|
|
|
send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
|
2013-11-27 21:33:14 +00:00
|
|
|
send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
|
2013-12-11 21:51:20 +00:00
|
|
|
send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
|
|
|
|
send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
|
2013-11-27 00:27:31 +00:00
|
|
|
send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
|
|
|
|
send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
|
2014-01-09 01:44:58 +00:00
|
|
|
send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
|
2014-06-10 23:32:09 +00:00
|
|
|
send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
|
|
|
|
send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
|
2014-09-05 21:20:18 +00:00
|
|
|
send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
|
|
|
|
send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families
|