2018-04-03 06:45:39 +00:00
|
|
|
#!/usr/bin/env bash
|
2012-03-21 17:28:03 +00:00
|
|
|
#
|
2012-04-17 15:36:46 +00:00
|
|
|
# Detects OS we're compiling on and outputs a file specified by the first
|
|
|
|
# argument, which in turn gets read while processing Makefile.
|
2012-03-21 17:28:03 +00:00
|
|
|
#
|
2012-04-17 15:36:46 +00:00
|
|
|
# The output will set the following variables:
|
2012-08-27 06:45:35 +00:00
|
|
|
# CC C Compiler path
|
|
|
|
# CXX C++ Compiler path
|
2012-03-21 17:28:03 +00:00
|
|
|
# PLATFORM_LDFLAGS Linker flags
|
2014-07-22 05:41:54 +00:00
|
|
|
# JAVA_LDFLAGS Linker flags for RocksDBJava
|
2015-10-09 18:41:40 +00:00
|
|
|
# JAVA_STATIC_LDFLAGS Linker flags for RocksDBJava static build
|
2012-03-30 20:15:49 +00:00
|
|
|
# PLATFORM_SHARED_EXT Extension for shared libraries
|
|
|
|
# PLATFORM_SHARED_LDFLAGS Flags for building shared library
|
|
|
|
# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
|
2012-03-21 17:28:03 +00:00
|
|
|
# PLATFORM_CCFLAGS C compiler flags
|
|
|
|
# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
|
2012-08-27 06:45:35 +00:00
|
|
|
# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
|
|
|
|
# shared libraries, empty otherwise.
|
2018-03-19 19:11:58 +00:00
|
|
|
# FIND Command for the find utility
|
|
|
|
# WATCH Command for the watch utility
|
2012-08-27 06:45:35 +00:00
|
|
|
#
|
|
|
|
# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
|
|
|
|
#
|
2016-08-03 18:07:53 +00:00
|
|
|
# -DROCKSDB_PLATFORM_POSIX if posix-platform based
|
2014-02-08 02:12:30 +00:00
|
|
|
# -DSNAPPY if the Snappy library is present
|
|
|
|
# -DLZ4 if the LZ4 library is present
|
2015-08-27 22:40:42 +00:00
|
|
|
# -DZSTD if the ZSTD library is present
|
2014-07-07 17:53:31 +00:00
|
|
|
# -DNUMA if the NUMA library is present
|
2016-08-18 17:44:29 +00:00
|
|
|
# -DTBB if the TBB library is present
|
2012-08-27 06:45:35 +00:00
|
|
|
#
|
2013-11-17 07:44:39 +00:00
|
|
|
# Using gflags in rocksdb:
|
|
|
|
# Our project depends on gflags, which requires users to take some extra steps
|
|
|
|
# before they can compile the whole repository:
|
|
|
|
# 1. Install gflags. You may download it from here:
|
2016-08-25 17:40:38 +00:00
|
|
|
# https://gflags.github.io/gflags/ (Mac users can `brew install gflags`)
|
|
|
|
# 2. Once installed, add the include path for gflags to your CPATH env var and
|
|
|
|
# the lib path to LIBRARY_PATH. If installed with default settings, the lib
|
|
|
|
# will be /usr/local/lib and the include path will be /usr/local/include
|
2012-03-21 17:28:03 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
OUTPUT=$1
|
|
|
|
if test -z "$OUTPUT"; then
|
2012-08-27 06:45:35 +00:00
|
|
|
echo "usage: $0 <output-filename>" >&2
|
2012-04-17 15:36:46 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
2011-06-29 00:30:50 +00:00
|
|
|
|
2013-11-18 19:40:16 +00:00
|
|
|
# we depend on C++11
|
2014-02-10 19:06:25 +00:00
|
|
|
PLATFORM_CXXFLAGS="-std=c++11"
|
2013-11-18 19:40:16 +00:00
|
|
|
# we currently depend on POSIX platform
|
2015-10-14 08:14:53 +00:00
|
|
|
COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX"
|
2013-11-18 19:40:16 +00:00
|
|
|
|
2013-01-14 20:39:24 +00:00
|
|
|
# Default to fbcode gcc on internal fb machines
|
2014-09-19 16:27:16 +00:00
|
|
|
if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then
|
2013-11-18 06:05:00 +00:00
|
|
|
FBCODE_BUILD="true"
|
2015-01-23 19:22:20 +00:00
|
|
|
# If we're compiling with TSAN we need pic build
|
|
|
|
PIC_BUILD=$COMPILE_WITH_TSAN
|
2019-01-28 18:57:57 +00:00
|
|
|
if [ -n "$ROCKSDB_FBCODE_BUILD_WITH_481" ]; then
|
2015-01-23 22:51:27 +00:00
|
|
|
# we need this to build with MySQL. Don't use for other purposes.
|
|
|
|
source "$PWD/build_tools/fbcode_config4.8.1.sh"
|
2019-10-21 19:07:58 +00:00
|
|
|
elif [ -n "$ROCKSDB_FBCODE_BUILD_WITH_5xx" ]; then
|
2019-01-28 18:57:57 +00:00
|
|
|
source "$PWD/build_tools/fbcode_config.sh"
|
2019-10-21 19:07:58 +00:00
|
|
|
else
|
|
|
|
source "$PWD/build_tools/fbcode_config_platform007.sh"
|
2015-01-23 22:51:27 +00:00
|
|
|
fi
|
2013-01-14 20:39:24 +00:00
|
|
|
fi
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
# Delete existing output, if it exists
|
2014-05-11 04:01:25 +00:00
|
|
|
rm -f "$OUTPUT"
|
|
|
|
touch "$OUTPUT"
|
2011-06-29 00:30:50 +00:00
|
|
|
|
2012-08-27 06:45:35 +00:00
|
|
|
if test -z "$CC"; then
|
2018-04-03 06:45:39 +00:00
|
|
|
if [ -x "$(command -v cc)" ]; then
|
|
|
|
CC=cc
|
|
|
|
elif [ -x "$(command -v clang)" ]; then
|
|
|
|
CC=clang
|
|
|
|
else
|
|
|
|
CC=cc
|
|
|
|
fi
|
2012-08-27 06:45:35 +00:00
|
|
|
fi
|
|
|
|
|
2011-11-30 10:59:40 +00:00
|
|
|
if test -z "$CXX"; then
|
2018-04-03 06:45:39 +00:00
|
|
|
if [ -x "$(command -v g++)" ]; then
|
|
|
|
CXX=g++
|
|
|
|
elif [ -x "$(command -v clang++)" ]; then
|
|
|
|
CXX=clang++
|
|
|
|
else
|
|
|
|
CXX=g++
|
|
|
|
fi
|
2011-11-30 10:59:40 +00:00
|
|
|
fi
|
|
|
|
|
2011-06-29 00:30:50 +00:00
|
|
|
# Detect OS
|
2012-03-21 17:28:03 +00:00
|
|
|
if test -z "$TARGET_OS"; then
|
|
|
|
TARGET_OS=`uname -s`
|
|
|
|
fi
|
|
|
|
|
2015-02-26 23:19:17 +00:00
|
|
|
if test -z "$TARGET_ARCHITECTURE"; then
|
|
|
|
TARGET_ARCHITECTURE=`uname -m`
|
|
|
|
fi
|
|
|
|
|
2015-02-04 05:43:06 +00:00
|
|
|
if test -z "$CLANG_SCAN_BUILD"; then
|
|
|
|
CLANG_SCAN_BUILD=scan-build
|
|
|
|
fi
|
|
|
|
|
build: do not relink every single binary just for a timestamp
Summary:
Prior to this change, "make check" would always waste a lot of
time relinking 60+ binaries. With this change, it does that
only when the generated file, util/build_version.cc, changes,
and that happens only when the date changes or when the
current git SHA changes.
This change makes some other improvements: before, there was no
rule to build a deleted util/build_version.cc. If it was somehow
removed, any attempt to link a program would fail.
There is no longer any need for the separate file,
build_tools/build_detect_version. Its functionality is
now in the Makefile.
* Makefile (DEPFILES): Don't filter-out util/build_version.cc.
No need, and besides, removing that dependency was wrong.
(date, git_sha, gen_build_version): New helper variables.
(util/build_version.cc): New rule, to create this file
and update it only if it would contain new information.
* build_tools/build_detect_platform: Remove file.
* db/db_impl.cc: Now, print only date (not the time).
* util/build_version.h (rocksdb_build_compile_time): Remove
declaration. No longer used.
Test Plan:
- Run "make check" twice, and note that the second time no linking is performed.
- Remove util/build_version.cc and ensure that any "make"
command regenerates it before doing anything else.
- Run this: strings librocksdb.a|grep _build_.
That prints output including the following:
rocksdb_build_git_date:2015-02-19
rocksdb_build_git_sha:2.8.fb-1792-g3cb6cc0
Reviewers: ljin, sdong, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33591
2015-02-19 21:11:10 +00:00
|
|
|
if test -z "$CLANG_ANALYZER"; then
|
2018-04-03 06:45:39 +00:00
|
|
|
CLANG_ANALYZER=$(command -v clang++ 2> /dev/null)
|
2015-02-04 05:43:06 +00:00
|
|
|
fi
|
|
|
|
|
2018-03-19 19:11:58 +00:00
|
|
|
if test -z "$FIND"; then
|
|
|
|
FIND=find
|
|
|
|
fi
|
|
|
|
|
|
|
|
if test -z "$WATCH"; then
|
|
|
|
WATCH=watch
|
|
|
|
fi
|
|
|
|
|
fPIC in x64 environment
Summary:
Check https://github.com/facebook/rocksdb/pull/15 for context.
Apparently [1], we need -fPIC in x64 environments (this is added only in non-fbcode).
In fbcode, I removed -fPIC per @dhruba's suggestion, since it introduces perf regression. I'm not sure what would are the implications of doing that, but looks like it works, and when releasing to the third-party, we're disabling -fPIC either way [2].
Would love a suggestion from someone who knows more about this
[1] http://eli.thegreenplace.net/2011/11/11/position-independent-code-pic-in-shared-libraries-on-x64/
[2] https://our.intern.facebook.com/intern/wiki/index.php/Database/RocksDB/Third_Party
Test Plan: make check works
Reviewers: dhruba, emayanke, kailiu
Reviewed By: dhruba
CC: leveldb, dhruba, reconnect.grayhat
Differential Revision: https://reviews.facebook.net/D14337
2013-11-26 05:21:01 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS ${CFLAGS}"
|
2012-08-27 06:45:35 +00:00
|
|
|
CROSS_COMPILE=
|
2012-03-21 17:28:03 +00:00
|
|
|
PLATFORM_CCFLAGS=
|
2013-01-15 02:37:01 +00:00
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS"
|
2012-03-30 20:15:49 +00:00
|
|
|
PLATFORM_SHARED_EXT="so"
|
2015-06-23 23:32:59 +00:00
|
|
|
PLATFORM_SHARED_LDFLAGS="-Wl,--no-as-needed -shared -Wl,-soname -Wl,"
|
2012-03-30 20:15:49 +00:00
|
|
|
PLATFORM_SHARED_CFLAGS="-fPIC"
|
2015-04-07 20:22:22 +00:00
|
|
|
PLATFORM_SHARED_VERSIONED=true
|
2012-03-21 17:28:03 +00:00
|
|
|
|
2013-04-11 17:54:35 +00:00
|
|
|
# generic port files (working on all platform by #ifdef) go directly in /port
|
2014-05-11 04:01:25 +00:00
|
|
|
GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "`
|
2013-04-11 17:54:35 +00:00
|
|
|
|
2012-03-21 17:28:03 +00:00
|
|
|
# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp
|
|
|
|
case "$TARGET_OS" in
|
2011-06-29 00:30:50 +00:00
|
|
|
Darwin)
|
|
|
|
PLATFORM=OS_MACOSX
|
2013-11-13 04:05:28 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX"
|
2012-03-30 20:15:49 +00:00
|
|
|
PLATFORM_SHARED_EXT=dylib
|
|
|
|
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/darwin/darwin_specific.cc
|
2011-06-29 00:30:50 +00:00
|
|
|
;;
|
2014-04-04 20:11:44 +00:00
|
|
|
IOS)
|
|
|
|
PLATFORM=IOS
|
2014-04-15 20:39:26 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE -DROCKSDB_LITE"
|
2014-04-04 20:11:44 +00:00
|
|
|
PLATFORM_SHARED_EXT=dylib
|
|
|
|
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
|
|
|
|
CROSS_COMPILE=true
|
2015-04-07 20:22:22 +00:00
|
|
|
PLATFORM_SHARED_VERSIONED=
|
2014-04-04 20:11:44 +00:00
|
|
|
;;
|
2011-06-29 00:30:50 +00:00
|
|
|
Linux)
|
|
|
|
PLATFORM=OS_LINUX
|
2013-08-16 03:53:21 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DOS_LINUX"
|
2013-01-15 02:37:01 +00:00
|
|
|
if [ -z "$USE_CLANG" ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
|
2018-04-25 19:08:24 +00:00
|
|
|
else
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -latomic"
|
2013-01-15 02:37:01 +00:00
|
|
|
fi
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
|
2020-02-13 02:00:24 +00:00
|
|
|
if test $ROCKSDB_USE_IO_URING; then
|
|
|
|
# check for liburing
|
|
|
|
$CXX $CFLAGS -x c++ - -luring -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <liburing.h>
|
|
|
|
int main() {
|
|
|
|
struct io_uring ring;
|
|
|
|
io_uring_queue_init(1, &ring, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-12-08 04:54:27 +00:00
|
|
|
EOF
|
2020-02-13 02:00:24 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -luring"
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_IOURING_PRESENT"
|
|
|
|
fi
|
2019-12-08 04:54:27 +00:00
|
|
|
fi
|
2019-08-07 21:29:35 +00:00
|
|
|
if test -z "$USE_FOLLY_DISTRIBUTED_MUTEX"; then
|
|
|
|
USE_FOLLY_DISTRIBUTED_MUTEX=1
|
|
|
|
fi
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/linux/linux_specific.cc
|
2011-06-29 00:30:50 +00:00
|
|
|
;;
|
|
|
|
SunOS)
|
|
|
|
PLATFORM=OS_SOLARIS
|
2017-04-22 03:41:37 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_SOLARIS -m64"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt -static-libstdc++ -static-libgcc -m64"
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/sunos/sunos_specific.cc
|
2011-06-29 00:30:50 +00:00
|
|
|
;;
|
2017-04-22 03:41:37 +00:00
|
|
|
AIX)
|
|
|
|
PLATFORM=OS_AIX
|
|
|
|
CC=gcc
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -maix64 -pthread -fno-builtin-memcmp -D_REENTRANT -DOS_AIX -D__STDC_FORMAT_MACROS"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread -lpthread -lrt -maix64 -static-libstdc++ -static-libgcc"
|
|
|
|
# PORT_FILES=port/aix/aix_specific.cc
|
|
|
|
;;
|
2011-07-27 01:46:25 +00:00
|
|
|
FreeBSD)
|
|
|
|
PLATFORM=OS_FREEBSD
|
2018-01-11 21:21:35 +00:00
|
|
|
CXX=clang++
|
2013-01-14 20:39:24 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_FREEBSD"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/freebsd/freebsd_specific.cc
|
2011-07-27 01:46:25 +00:00
|
|
|
;;
|
2012-03-05 18:35:46 +00:00
|
|
|
NetBSD)
|
|
|
|
PLATFORM=OS_NETBSD
|
2013-01-14 20:39:24 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_NETBSD"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lgcc_s"
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/netbsd/netbsd_specific.cc
|
2012-03-05 18:35:46 +00:00
|
|
|
;;
|
|
|
|
OpenBSD)
|
|
|
|
PLATFORM=OS_OPENBSD
|
2018-03-19 19:11:58 +00:00
|
|
|
CXX=clang++
|
2013-01-14 20:39:24 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_OPENBSD"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread"
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/openbsd/openbsd_specific.cc
|
2018-03-19 19:11:58 +00:00
|
|
|
FIND=gfind
|
|
|
|
WATCH=gnuwatch
|
2012-03-05 18:35:46 +00:00
|
|
|
;;
|
|
|
|
DragonFly)
|
|
|
|
PLATFORM=OS_DRAGONFLYBSD
|
2013-01-14 20:39:24 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_DRAGONFLYBSD"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/dragonfly/dragonfly_specific.cc
|
2012-03-21 17:28:03 +00:00
|
|
|
;;
|
2015-04-24 02:17:57 +00:00
|
|
|
Cygwin)
|
|
|
|
PLATFORM=CYGWIN
|
2015-06-12 20:54:29 +00:00
|
|
|
PLATFORM_SHARED_CFLAGS=""
|
2015-04-24 02:17:57 +00:00
|
|
|
PLATFORM_CXXFLAGS="-std=gnu++11"
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DCYGWIN"
|
|
|
|
if [ -z "$USE_CLANG" ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
|
2018-04-25 19:08:24 +00:00
|
|
|
else
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -latomic"
|
2015-04-24 02:17:57 +00:00
|
|
|
fi
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
|
|
|
|
# PORT_FILES=port/linux/linux_specific.cc
|
|
|
|
;;
|
2012-03-21 17:28:03 +00:00
|
|
|
OS_ANDROID_CROSSCOMPILE)
|
2012-08-27 06:45:35 +00:00
|
|
|
PLATFORM=OS_ANDROID
|
2016-08-03 18:07:53 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_ANDROID -DROCKSDB_PLATFORM_POSIX"
|
2013-01-14 20:39:24 +00:00
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS " # All pthread features are in the Android C library
|
2013-04-11 17:54:35 +00:00
|
|
|
# PORT_FILES=port/android/android.cc
|
2012-08-27 06:45:35 +00:00
|
|
|
CROSS_COMPILE=true
|
2012-03-05 18:35:46 +00:00
|
|
|
;;
|
2011-06-29 00:30:50 +00:00
|
|
|
*)
|
2012-08-27 06:45:35 +00:00
|
|
|
echo "Unknown platform!" >&2
|
2011-06-29 00:30:50 +00:00
|
|
|
exit 1
|
|
|
|
esac
|
|
|
|
|
2015-04-24 02:17:57 +00:00
|
|
|
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS ${CXXFLAGS}"
|
2014-07-22 05:41:54 +00:00
|
|
|
JAVA_LDFLAGS="$PLATFORM_LDFLAGS"
|
2015-10-09 18:41:40 +00:00
|
|
|
JAVA_STATIC_LDFLAGS="$PLATFORM_LDFLAGS"
|
2014-07-22 05:41:54 +00:00
|
|
|
|
2013-11-18 06:05:00 +00:00
|
|
|
if [ "$CROSS_COMPILE" = "true" -o "$FBCODE_BUILD" = "true" ]; then
|
2012-03-21 17:28:03 +00:00
|
|
|
# Cross-compiling; do not try any compilation tests.
|
2013-11-18 06:05:00 +00:00
|
|
|
# Also don't need any compilation tests if compiling on fbcode
|
2014-01-06 19:53:19 +00:00
|
|
|
true
|
2011-06-29 00:30:50 +00:00
|
|
|
else
|
2016-02-12 01:00:01 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_FALLOCATE; then
|
|
|
|
# Test whether fallocate is available
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <linux/falloc.h>
|
|
|
|
int main() {
|
|
|
|
int fd = open("/dev/null", 0);
|
2019-03-04 23:40:26 +00:00
|
|
|
fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 1024);
|
2016-02-12 01:00:01 +00:00
|
|
|
}
|
2013-12-11 06:34:19 +00:00
|
|
|
EOF
|
2016-02-12 01:00:01 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_FALLOCATE_PRESENT"
|
|
|
|
fi
|
2013-12-11 06:34:19 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
|
|
|
# Test whether Snappy library is installed
|
|
|
|
# http://code.google.com/p/snappy/
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <snappy.h>
|
|
|
|
int main() {}
|
2011-06-29 00:30:50 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lsnappy"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -lsnappy"
|
|
|
|
fi
|
2012-03-21 17:28:03 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_GFLAGS; then
|
|
|
|
# Test whether gflags library is installed
|
|
|
|
# http://gflags.github.io/gflags/
|
|
|
|
# check if the namespace is gflags
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
|
|
|
|
#include <gflags/gflags.h>
|
|
|
|
int main() {}
|
2015-04-22 19:47:50 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=1"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
|
|
|
else
|
|
|
|
# check if namespace is google
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
|
|
|
|
#include <gflags/gflags.h>
|
|
|
|
using namespace google;
|
|
|
|
int main() {}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=google"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
|
|
|
fi
|
|
|
|
fi
|
2013-10-24 14:43:14 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
|
|
|
# Test whether zlib library is installed
|
|
|
|
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <zlib.h>
|
|
|
|
int main() {}
|
2012-06-28 06:41:33 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DZLIB"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lz"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -lz"
|
|
|
|
fi
|
2012-06-28 06:41:33 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_BZIP; then
|
|
|
|
# Test whether bzip library is installed
|
|
|
|
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <bzlib.h>
|
|
|
|
int main() {}
|
2012-06-29 02:26:43 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DBZIP2"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lbz2"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -lbz2"
|
|
|
|
fi
|
2012-06-29 02:26:43 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_LZ4; then
|
|
|
|
# Test whether lz4 library is installed
|
|
|
|
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <lz4.h>
|
|
|
|
#include <lz4hc.h>
|
|
|
|
int main() {}
|
2014-02-08 02:12:30 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DLZ4"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -llz4"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -llz4"
|
|
|
|
fi
|
2014-02-08 02:12:30 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
|
|
|
# Test whether zstd library is installed
|
|
|
|
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <zstd.h>
|
|
|
|
int main() {}
|
2015-08-27 22:40:42 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DZSTD"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lzstd"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -lzstd"
|
|
|
|
fi
|
2015-08-27 22:40:42 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_NUMA; then
|
|
|
|
# Test whether numa is available
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null -lnuma 2>/dev/null <<EOF
|
|
|
|
#include <numa.h>
|
|
|
|
#include <numaif.h>
|
|
|
|
int main() {}
|
2014-07-07 17:53:31 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DNUMA"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lnuma"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -lnuma"
|
|
|
|
fi
|
2014-07-07 17:53:31 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_TBB; then
|
|
|
|
# Test whether tbb is available
|
|
|
|
$CXX $CFLAGS $LDFLAGS -x c++ - -o /dev/null -ltbb 2>/dev/null <<EOF
|
|
|
|
#include <tbb/tbb.h>
|
|
|
|
int main() {}
|
2016-08-18 17:44:29 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DTBB"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltbb"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -ltbb"
|
|
|
|
fi
|
2016-08-18 17:44:29 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_JEMALLOC; then
|
|
|
|
# Test whether jemalloc is available
|
|
|
|
if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null -ljemalloc \
|
|
|
|
2>/dev/null; then
|
|
|
|
# This will enable some preprocessor identifiers in the Makefile
|
|
|
|
JEMALLOC=1
|
|
|
|
# JEMALLOC can be enabled either using the flag (like here) or by
|
|
|
|
# providing direct link to the jemalloc library
|
|
|
|
WITH_JEMALLOC_FLAG=1
|
2018-12-18 00:27:00 +00:00
|
|
|
# check for JEMALLOC installed with HomeBrew
|
|
|
|
if [ "$PLATFORM" == "OS_MACOSX" ]; then
|
|
|
|
if hash brew 2>/dev/null && brew ls --versions jemalloc > /dev/null; then
|
|
|
|
JEMALLOC_VER=$(brew ls --versions jemalloc | tail -n 1 | cut -f 2 -d ' ')
|
|
|
|
JEMALLOC_INCLUDE="-I/usr/local/Cellar/jemalloc/${JEMALLOC_VER}/include"
|
|
|
|
JEMALLOC_LIB="/usr/local/Cellar/jemalloc/${JEMALLOC_VER}/lib/libjemalloc_pic.a"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS $JEMALLOC_LIB"
|
|
|
|
JAVA_STATIC_LDFLAGS="$JAVA_STATIC_LDFLAGS $JEMALLOC_LIB"
|
|
|
|
fi
|
|
|
|
fi
|
2017-12-05 23:05:41 +00:00
|
|
|
fi
|
|
|
|
fi
|
|
|
|
if ! test $JEMALLOC && ! test $ROCKSDB_DISABLE_TCMALLOC; then
|
2015-04-24 00:48:18 +00:00
|
|
|
# jemalloc is not available. Let's try tcmalloc
|
|
|
|
if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \
|
2016-04-27 23:23:33 +00:00
|
|
|
-ltcmalloc 2>/dev/null; then
|
2015-04-24 00:48:18 +00:00
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltcmalloc"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -ltcmalloc"
|
|
|
|
fi
|
2012-03-21 17:28:03 +00:00
|
|
|
fi
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 18:48:09 +00:00
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_MALLOC_USABLE_SIZE; then
|
|
|
|
# Test whether malloc_usable_size is available
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <malloc.h>
|
|
|
|
int main() {
|
|
|
|
size_t res = malloc_usable_size(0);
|
2019-05-15 22:57:04 +00:00
|
|
|
(void)res;
|
2017-12-05 23:05:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 18:48:09 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_MALLOC_USABLE_SIZE"
|
|
|
|
fi
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 18:48:09 +00:00
|
|
|
fi
|
2016-04-22 23:49:12 +00:00
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_PTHREAD_MUTEX_ADAPTIVE_NP; then
|
|
|
|
# Test whether PTHREAD_MUTEX_ADAPTIVE_NP mutex type is available
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <pthread.h>
|
|
|
|
int main() {
|
|
|
|
int x = PTHREAD_MUTEX_ADAPTIVE_NP;
|
2019-05-15 22:57:04 +00:00
|
|
|
(void)x;
|
2017-12-05 23:05:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-04-22 23:49:12 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_PTHREAD_ADAPTIVE_MUTEX"
|
|
|
|
fi
|
2016-04-22 23:49:12 +00:00
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_BACKTRACE; then
|
|
|
|
# Test whether backtrace is available
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
2019-05-15 22:57:04 +00:00
|
|
|
#include <execinfo.h>
|
2016-04-22 23:49:12 +00:00
|
|
|
int main() {
|
|
|
|
void* frames[1];
|
|
|
|
backtrace_symbols(frames, backtrace(frames, 1));
|
2017-12-05 23:05:41 +00:00
|
|
|
return 0;
|
2016-04-22 23:49:12 +00:00
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
|
2017-12-05 23:05:41 +00:00
|
|
|
else
|
|
|
|
# Test whether execinfo library is installed
|
|
|
|
$CXX $CFLAGS -lexecinfo -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <execinfo.h>
|
|
|
|
int main() {
|
|
|
|
void* frames[1];
|
|
|
|
backtrace_symbols(frames, backtrace(frames, 1));
|
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lexecinfo"
|
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS -lexecinfo"
|
|
|
|
fi
|
2016-04-22 23:49:12 +00:00
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_PG; then
|
|
|
|
# Test if -pg is supported
|
|
|
|
$CXX $CFLAGS -pg -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
int main() {
|
|
|
|
return 0;
|
|
|
|
}
|
2016-04-22 23:49:12 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
PROFILING_FLAGS=-pg
|
|
|
|
fi
|
2016-04-22 23:49:12 +00:00
|
|
|
fi
|
2017-04-22 03:41:37 +00:00
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_SYNC_FILE_RANGE; then
|
|
|
|
# Test whether sync_file_range is supported for compatibility with an old glibc
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <fcntl.h>
|
|
|
|
int main() {
|
|
|
|
int fd = open("/dev/null", 0);
|
|
|
|
sync_file_range(fd, 0, 1024, SYNC_FILE_RANGE_WRITE);
|
|
|
|
}
|
2017-04-22 03:41:37 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_RANGESYNC_PRESENT"
|
|
|
|
fi
|
2017-04-22 03:41:37 +00:00
|
|
|
fi
|
2017-05-10 18:50:10 +00:00
|
|
|
|
2017-12-05 23:05:41 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_SCHED_GETCPU; then
|
|
|
|
# Test whether sched_getcpu is supported
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <sched.h>
|
|
|
|
int main() {
|
|
|
|
int cpuid = sched_getcpu();
|
2019-05-15 22:57:04 +00:00
|
|
|
(void)cpuid;
|
2017-12-05 23:05:41 +00:00
|
|
|
}
|
2017-05-10 18:50:10 +00:00
|
|
|
EOF
|
2017-12-05 23:05:41 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SCHED_GETCPU_PRESENT"
|
|
|
|
fi
|
2017-05-10 18:50:10 +00:00
|
|
|
fi
|
2018-10-23 17:53:38 +00:00
|
|
|
|
2020-03-04 02:06:56 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_AUXV_GETAUXVAL; then
|
|
|
|
# Test whether getauxval is supported
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <sys/auxv.h>
|
|
|
|
int main() {
|
|
|
|
uint64_t auxv = getauxval(AT_HWCAP);
|
|
|
|
(void)auxv;
|
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_AUXV_GETAUXVAL_PRESENT"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2018-10-23 17:53:38 +00:00
|
|
|
if ! test $ROCKSDB_DISABLE_ALIGNED_NEW; then
|
|
|
|
# Test whether c++17 aligned-new is supported
|
|
|
|
$CXX $PLATFORM_CXXFLAGS -faligned-new -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
struct alignas(1024) t {int a;};
|
|
|
|
int main() {}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS -faligned-new -DHAVE_ALIGNED_NEW"
|
|
|
|
fi
|
|
|
|
fi
|
2011-06-29 00:30:50 +00:00
|
|
|
fi
|
|
|
|
|
2015-02-26 23:19:17 +00:00
|
|
|
# TODO(tec): Fix -Wshorten-64-to-32 errors on FreeBSD and enable the warning.
|
|
|
|
# -Wshorten-64-to-32 breaks compilation on FreeBSD i386
|
|
|
|
if ! [ "$TARGET_OS" = FreeBSD -a "$TARGET_ARCHITECTURE" = i386 ]; then
|
|
|
|
# Test whether -Wshorten-64-to-32 is available
|
|
|
|
$CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null <<EOF
|
|
|
|
int main() {}
|
2014-11-11 21:47:22 +00:00
|
|
|
EOF
|
2015-02-26 23:19:17 +00:00
|
|
|
if [ "$?" = 0 ]; then
|
2014-11-11 21:47:22 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -Wshorten-64-to-32"
|
2015-02-26 23:19:17 +00:00
|
|
|
fi
|
2014-11-11 21:47:22 +00:00
|
|
|
fi
|
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
# shall we use HDFS?
|
|
|
|
|
|
|
|
if test "$USE_HDFS"; then
|
|
|
|
if test -z "$JAVA_HOME"; then
|
2019-05-15 22:57:04 +00:00
|
|
|
echo "JAVA_HOME has to be set for HDFS usage." >&2
|
2012-06-08 08:11:14 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
2019-03-29 02:16:58 +00:00
|
|
|
HDFS_CCFLAGS="$HDFS_CCFLAGS -I$JAVA_HOME/include -I$JAVA_HOME/include/linux -DUSE_HDFS -I$HADOOP_HOME/include"
|
|
|
|
HDFS_LDFLAGS="$HDFS_LDFLAGS -lhdfs -L$JAVA_HOME/jre/lib/amd64 -L$HADOOP_HOME/lib/native"
|
2012-10-26 19:52:46 +00:00
|
|
|
HDFS_LDFLAGS="$HDFS_LDFLAGS -L$JAVA_HOME/jre/lib/amd64/server -L$GLIBC_RUNTIME_PATH/lib"
|
|
|
|
HDFS_LDFLAGS="$HDFS_LDFLAGS -ldl -lverify -ljava -ljvm"
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS $HDFS_CCFLAGS"
|
|
|
|
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS $HDFS_LDFLAGS"
|
2014-07-22 05:41:54 +00:00
|
|
|
JAVA_LDFLAGS="$JAVA_LDFLAGS $HDFS_LDFLAGS"
|
2012-06-08 08:11:14 +00:00
|
|
|
fi
|
|
|
|
|
2019-09-13 18:04:52 +00:00
|
|
|
if test "0$PORTABLE" -eq 0; then
|
2016-01-19 15:08:19 +00:00
|
|
|
if test -n "`echo $TARGET_ARCHITECTURE | grep ^ppc64`"; then
|
|
|
|
# Tune for this POWER processor, treating '+' models as base models
|
|
|
|
POWER=`LD_SHOW_AUXV=1 /bin/true | grep AT_PLATFORM | grep -E -o power[0-9]+`
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -mcpu=$POWER -mtune=$POWER "
|
2016-02-29 20:02:52 +00:00
|
|
|
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^s390x`"; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -march=z10 "
|
2019-06-18 18:20:52 +00:00
|
|
|
elif test -n "`echo $TARGET_ARCHITECTURE | grep -e^arm -e^aarch64`"; then
|
2018-01-10 02:12:32 +00:00
|
|
|
# TODO: Handle this with approprite options.
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS"
|
2019-06-13 18:43:35 +00:00
|
|
|
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^aarch64`"; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS"
|
2018-04-03 06:45:39 +00:00
|
|
|
elif [ "$TARGET_OS" == "IOS" ]; then
|
2018-03-06 20:27:07 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS"
|
2019-09-13 18:04:52 +00:00
|
|
|
elif [ "$TARGET_OS" == "AIX" ] || [ "$TARGET_OS" == "SunOS" ]; then
|
|
|
|
# TODO: Not sure why we don't use -march=native on these OSes
|
|
|
|
if test "$USE_SSE"; then
|
|
|
|
TRY_SSE_ETC="1"
|
|
|
|
fi
|
|
|
|
else
|
2016-01-19 15:08:19 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -march=native "
|
|
|
|
fi
|
2019-09-13 18:04:52 +00:00
|
|
|
else
|
|
|
|
# PORTABLE=1
|
|
|
|
if test "$USE_SSE"; then
|
|
|
|
TRY_SSE_ETC="1"
|
|
|
|
fi
|
2014-12-15 10:29:41 +00:00
|
|
|
fi
|
2012-10-17 18:59:44 +00:00
|
|
|
|
2019-09-13 18:04:52 +00:00
|
|
|
if test "$TRY_SSE_ETC"; then
|
|
|
|
# The USE_SSE flag now means "attempt to compile with widely-available
|
|
|
|
# Intel architecture extensions utilized by specific optimizations in the
|
|
|
|
# source code." It's a qualifier on PORTABLE=1 that means "mostly portable."
|
|
|
|
# It doesn't even really check that your current CPU is compatible.
|
|
|
|
#
|
|
|
|
# SSE4.2 available since nehalem, ca. 2008-2010
|
|
|
|
TRY_SSE42="-msse4.2"
|
|
|
|
# PCLMUL available since westmere, ca. 2010-2011
|
|
|
|
TRY_PCLMUL="-mpclmul"
|
New Bloom filter implementation for full and partitioned filters (#6007)
Summary:
Adds an improved, replacement Bloom filter implementation (FastLocalBloom) for full and partitioned filters in the block-based table. This replacement is faster and more accurate, especially for high bits per key or millions of keys in a single filter.
Speed
The improved speed, at least on recent x86_64, comes from
* Using fastrange instead of modulo (%)
* Using our new hash function (XXH3 preview, added in a previous commit), which is much faster for large keys and only *slightly* slower on keys around 12 bytes if hashing the same size many thousands of times in a row.
* Optimizing the Bloom filter queries with AVX2 SIMD operations. (Added AVX2 to the USE_SSE=1 build.) Careful design was required to support (a) SIMD-optimized queries, (b) compatible non-SIMD code that's simple and efficient, (c) flexible choice of number of probes, and (d) essentially maximized accuracy for a cache-local Bloom filter. Probes are made eight at a time, so any number of probes up to 8 is the same speed, then up to 16, etc.
* Prefetching cache lines when building the filter. Although this optimization could be applied to the old structure as well, it seems to balance out the small added cost of accumulating 64 bit hashes for adding to the filter rather than 32 bit hashes.
Here's nominal speed data from filter_bench (200MB in filters, about 10k keys each, 10 bits filter data / key, 6 probes, avg key size 24 bytes, includes hashing time) on Skylake DE (relatively low clock speed):
$ ./filter_bench -quick -impl=2 -net_includes_hashing # New Bloom filter
Build avg ns/key: 47.7135
Mixed inside/outside queries...
Single filter net ns/op: 26.2825
Random filter net ns/op: 150.459
Average FP rate %: 0.954651
$ ./filter_bench -quick -impl=0 -net_includes_hashing # Old Bloom filter
Build avg ns/key: 47.2245
Mixed inside/outside queries...
Single filter net ns/op: 63.2978
Random filter net ns/op: 188.038
Average FP rate %: 1.13823
Similar build time but dramatically faster query times on hot data (63 ns to 26 ns), and somewhat faster on stale data (188 ns to 150 ns). Performance differences on batched and skewed query loads are between these extremes as expected.
The only other interesting thing about speed is "inside" (query key was added to filter) vs. "outside" (query key was not added to filter) query times. The non-SIMD implementations are substantially slower when most queries are "outside" vs. "inside". This goes against what one might expect or would have observed years ago, as "outside" queries only need about two probes on average, due to short-circuiting, while "inside" always have num_probes (say 6). The problem is probably the nastily unpredictable branch. The SIMD implementation has few branches (very predictable) and has pretty consistent running time regardless of query outcome.
Accuracy
The generally improved accuracy (re: Issue https://github.com/facebook/rocksdb/issues/5857) comes from a better design for probing indices
within a cache line (re: Issue https://github.com/facebook/rocksdb/issues/4120) and improved accuracy for millions of keys in a single filter from using a 64-bit hash function (XXH3p). Design details in code comments.
Accuracy data (generalizes, except old impl gets worse with millions of keys):
Memory bits per key: FP rate percent old impl -> FP rate percent new impl
6: 5.70953 -> 5.69888
8: 2.45766 -> 2.29709
10: 1.13977 -> 0.959254
12: 0.662498 -> 0.411593
16: 0.353023 -> 0.0873754
24: 0.261552 -> 0.0060971
50: 0.225453 -> ~0.00003 (less than 1 in a million queries are FP)
Fixes https://github.com/facebook/rocksdb/issues/5857
Fixes https://github.com/facebook/rocksdb/issues/4120
Unlike the old implementation, this implementation has a fixed cache line size (64 bytes). At 10 bits per key, the accuracy of this new implementation is very close to the old implementation with 128-byte cache line size. If there's sufficient demand, this implementation could be generalized.
Compatibility
Although old releases would see the new structure as corrupt filter data and read the table as if there's no filter, we've decided only to enable the new Bloom filter with new format_version=5. This provides a smooth path for automatic adoption over time, with an option for early opt-in.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6007
Test Plan: filter_bench has been used thoroughly to validate speed, accuracy, and correctness. Unit tests have been carefully updated to exercise new and old implementations, as well as the logic to select an implementation based on context (format_version).
Differential Revision: D18294749
Pulled By: pdillinger
fbshipit-source-id: d44c9db3696e4d0a17caaec47075b7755c262c5f
2019-11-14 00:31:26 +00:00
|
|
|
# AVX2 available since haswell, ca. 2013-2015
|
|
|
|
TRY_AVX2="-mavx2"
|
2019-09-13 18:04:52 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_SSE42 -x c++ - -o /dev/null 2>/dev/null <<EOF
|
cross-platform compatibility improvements
Summary:
We've had a couple CockroachDB users fail to build RocksDB on exotic platforms, so I figured I'd try my hand at solving these issues upstream. The problems stem from a) `USE_SSE=1` being too aggressive about turning on SSE4.2, even on toolchains that don't support SSE4.2 and b) RocksDB attempting to detect support for thread-local storage based on OS, even though it can vary by compiler on the same OS.
See the individual commit messages for details. Regarding SSE support, this PR should change virtually nothing for non-CMake based builds. `make`, `PORTABLE=1 make`, `USE_SSE=1 make`, and `PORTABLE=1 USE_SSE=1 make` function exactly as before, except that SSE support will be automatically disabled when a simple SSE4.2-using test program fails to compile, as it does on OpenBSD. (OpenBSD's ports GCC supports SSE4.2, but its binutils do not, so `__SSE_4_2__` is defined but an SSE4.2-using program will fail to assemble.) A warning is emitted in this case. The CMake build is modified to support the same set of options, except that `USE_SSE` is spelled `FORCE_SSE42` because `USE_SSE` is rather useless now that we can automatically detect SSE support, and I figure changing options in the CMake build is less disruptive than changing the non-CMake build.
I've tested these changes on all the platforms I can get my hands on (macOS, Windows MSVC, Windows MinGW, and OpenBSD) and it all works splendidly. Let me know if there's anything you object to—I obviously don't mean to break any of your build pipelines in the process of fixing ours downstream.
Closes https://github.com/facebook/rocksdb/pull/2199
Differential Revision: D5054042
Pulled By: yiwu-arbug
fbshipit-source-id: 938e1fc665c049c02ae15698e1409155b8e72171
2017-05-15 21:42:32 +00:00
|
|
|
#include <cstdint>
|
|
|
|
#include <nmmintrin.h>
|
|
|
|
int main() {
|
|
|
|
volatile uint32_t x = _mm_crc32_u32(0, 0);
|
2019-05-15 22:57:04 +00:00
|
|
|
(void)x;
|
cross-platform compatibility improvements
Summary:
We've had a couple CockroachDB users fail to build RocksDB on exotic platforms, so I figured I'd try my hand at solving these issues upstream. The problems stem from a) `USE_SSE=1` being too aggressive about turning on SSE4.2, even on toolchains that don't support SSE4.2 and b) RocksDB attempting to detect support for thread-local storage based on OS, even though it can vary by compiler on the same OS.
See the individual commit messages for details. Regarding SSE support, this PR should change virtually nothing for non-CMake based builds. `make`, `PORTABLE=1 make`, `USE_SSE=1 make`, and `PORTABLE=1 USE_SSE=1 make` function exactly as before, except that SSE support will be automatically disabled when a simple SSE4.2-using test program fails to compile, as it does on OpenBSD. (OpenBSD's ports GCC supports SSE4.2, but its binutils do not, so `__SSE_4_2__` is defined but an SSE4.2-using program will fail to assemble.) A warning is emitted in this case. The CMake build is modified to support the same set of options, except that `USE_SSE` is spelled `FORCE_SSE42` because `USE_SSE` is rather useless now that we can automatically detect SSE support, and I figure changing options in the CMake build is less disruptive than changing the non-CMake build.
I've tested these changes on all the platforms I can get my hands on (macOS, Windows MSVC, Windows MinGW, and OpenBSD) and it all works splendidly. Let me know if there's anything you object to—I obviously don't mean to break any of your build pipelines in the process of fixing ours downstream.
Closes https://github.com/facebook/rocksdb/pull/2199
Differential Revision: D5054042
Pulled By: yiwu-arbug
fbshipit-source-id: 938e1fc665c049c02ae15698e1409155b8e72171
2017-05-15 21:42:32 +00:00
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
2019-09-13 18:04:52 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS $TRY_SSE42 -DHAVE_SSE42"
|
cross-platform compatibility improvements
Summary:
We've had a couple CockroachDB users fail to build RocksDB on exotic platforms, so I figured I'd try my hand at solving these issues upstream. The problems stem from a) `USE_SSE=1` being too aggressive about turning on SSE4.2, even on toolchains that don't support SSE4.2 and b) RocksDB attempting to detect support for thread-local storage based on OS, even though it can vary by compiler on the same OS.
See the individual commit messages for details. Regarding SSE support, this PR should change virtually nothing for non-CMake based builds. `make`, `PORTABLE=1 make`, `USE_SSE=1 make`, and `PORTABLE=1 USE_SSE=1 make` function exactly as before, except that SSE support will be automatically disabled when a simple SSE4.2-using test program fails to compile, as it does on OpenBSD. (OpenBSD's ports GCC supports SSE4.2, but its binutils do not, so `__SSE_4_2__` is defined but an SSE4.2-using program will fail to assemble.) A warning is emitted in this case. The CMake build is modified to support the same set of options, except that `USE_SSE` is spelled `FORCE_SSE42` because `USE_SSE` is rather useless now that we can automatically detect SSE support, and I figure changing options in the CMake build is less disruptive than changing the non-CMake build.
I've tested these changes on all the platforms I can get my hands on (macOS, Windows MSVC, Windows MinGW, and OpenBSD) and it all works splendidly. Let me know if there's anything you object to—I obviously don't mean to break any of your build pipelines in the process of fixing ours downstream.
Closes https://github.com/facebook/rocksdb/pull/2199
Differential Revision: D5054042
Pulled By: yiwu-arbug
fbshipit-source-id: 938e1fc665c049c02ae15698e1409155b8e72171
2017-05-15 21:42:32 +00:00
|
|
|
elif test "$USE_SSE"; then
|
2019-05-15 22:57:04 +00:00
|
|
|
echo "warning: USE_SSE specified but compiler could not use SSE intrinsics, disabling" >&2
|
Port 3 way SSE4.2 crc32c implementation from Folly
Summary:
**# Summary**
RocksDB uses SSE crc32 intrinsics to calculate the crc32 values but it does it in single way fashion (not pipelined on single CPU core). Intel's whitepaper () published an algorithm that uses 3-way pipelining for the crc32 intrinsics, then use pclmulqdq intrinsic to combine the values. Because pclmulqdq has overhead on its own, this algorithm will show perf gains on buffers larger than 216 bytes, which makes RocksDB a perfect user, since most of the buffers RocksDB call crc32c on is over 4KB. Initial db_bench show tremendous CPU gain.
This change uses the 3-way SSE algorithm by default. The old SSE algorithm is now behind a compiler tag NO_THREEWAY_CRC32C. If user compiles the code with NO_THREEWAY_CRC32C=1 then the old SSE Crc32c algorithm would be used. If the server does not have SSE4.2 at the run time the slow way (Non SSE) will be used.
**# Performance Test Results**
We ran the FillRandom and ReadRandom benchmarks in db_bench. ReadRandom is the point of interest here since it calculates the CRC32 for the in-mem buffers. We did 3 runs for each algorithm.
Before this change the CRC32 value computation takes about 11.5% of total CPU cost, and with the new 3-way algorithm it reduced to around 4.5%. The overall throughput also improved from 25.53MB/s to 27.63MB/s.
1) ReadRandom in db_bench overall metrics
PER RUN
Algorithm | run | micros/op | ops/sec |Throughput (MB/s)
3-way | 1 | 4.143 | 241387 | 26.7
3-way | 2 | 3.775 | 264872 | 29.3
3-way | 3 | 4.116 | 242929 | 26.9
FastCrc32c|1 | 4.037 | 247727 | 27.4
FastCrc32c|2 | 4.648 | 215166 | 23.8
FastCrc32c|3 | 4.352 | 229799 | 25.4
AVG
Algorithm | Average of micros/op | Average of ops/sec | Average of Throughput (MB/s)
3-way | 4.01 | 249,729 | 27.63
FastCrc32c | 4.35 | 230,897 | 25.53
2) Crc32c computation CPU cost (inclusive samples percentage)
PER RUN
Implementation | run | TotalSamples | Crc32c percentage
3-way | 1 | 4,572,250,000 | 4.37%
3-way | 2 | 3,779,250,000 | 4.62%
3-way | 3 | 4,129,500,000 | 4.48%
FastCrc32c | 1 | 4,663,500,000 | 11.24%
FastCrc32c | 2 | 4,047,500,000 | 12.34%
FastCrc32c | 3 | 4,366,750,000 | 11.68%
**# Test Plan**
make -j64 corruption_test && ./corruption_test
By default it uses 3-way SSE algorithm
NO_THREEWAY_CRC32C=1 make -j64 corruption_test && ./corruption_test
make clean && DEBUG_LEVEL=0 make -j64 db_bench
make clean && DEBUG_LEVEL=0 NO_THREEWAY_CRC32C=1 make -j64 db_bench
Closes https://github.com/facebook/rocksdb/pull/3173
Differential Revision: D6330882
Pulled By: yingsu00
fbshipit-source-id: 8ec3d89719533b63b536a736663ca6f0dd4482e9
2017-12-20 02:20:50 +00:00
|
|
|
fi
|
|
|
|
|
2019-09-13 18:04:52 +00:00
|
|
|
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_PCLMUL -x c++ - -o /dev/null 2>/dev/null <<EOF
|
Port 3 way SSE4.2 crc32c implementation from Folly
Summary:
**# Summary**
RocksDB uses SSE crc32 intrinsics to calculate the crc32 values but it does it in single way fashion (not pipelined on single CPU core). Intel's whitepaper () published an algorithm that uses 3-way pipelining for the crc32 intrinsics, then use pclmulqdq intrinsic to combine the values. Because pclmulqdq has overhead on its own, this algorithm will show perf gains on buffers larger than 216 bytes, which makes RocksDB a perfect user, since most of the buffers RocksDB call crc32c on is over 4KB. Initial db_bench show tremendous CPU gain.
This change uses the 3-way SSE algorithm by default. The old SSE algorithm is now behind a compiler tag NO_THREEWAY_CRC32C. If user compiles the code with NO_THREEWAY_CRC32C=1 then the old SSE Crc32c algorithm would be used. If the server does not have SSE4.2 at the run time the slow way (Non SSE) will be used.
**# Performance Test Results**
We ran the FillRandom and ReadRandom benchmarks in db_bench. ReadRandom is the point of interest here since it calculates the CRC32 for the in-mem buffers. We did 3 runs for each algorithm.
Before this change the CRC32 value computation takes about 11.5% of total CPU cost, and with the new 3-way algorithm it reduced to around 4.5%. The overall throughput also improved from 25.53MB/s to 27.63MB/s.
1) ReadRandom in db_bench overall metrics
PER RUN
Algorithm | run | micros/op | ops/sec |Throughput (MB/s)
3-way | 1 | 4.143 | 241387 | 26.7
3-way | 2 | 3.775 | 264872 | 29.3
3-way | 3 | 4.116 | 242929 | 26.9
FastCrc32c|1 | 4.037 | 247727 | 27.4
FastCrc32c|2 | 4.648 | 215166 | 23.8
FastCrc32c|3 | 4.352 | 229799 | 25.4
AVG
Algorithm | Average of micros/op | Average of ops/sec | Average of Throughput (MB/s)
3-way | 4.01 | 249,729 | 27.63
FastCrc32c | 4.35 | 230,897 | 25.53
2) Crc32c computation CPU cost (inclusive samples percentage)
PER RUN
Implementation | run | TotalSamples | Crc32c percentage
3-way | 1 | 4,572,250,000 | 4.37%
3-way | 2 | 3,779,250,000 | 4.62%
3-way | 3 | 4,129,500,000 | 4.48%
FastCrc32c | 1 | 4,663,500,000 | 11.24%
FastCrc32c | 2 | 4,047,500,000 | 12.34%
FastCrc32c | 3 | 4,366,750,000 | 11.68%
**# Test Plan**
make -j64 corruption_test && ./corruption_test
By default it uses 3-way SSE algorithm
NO_THREEWAY_CRC32C=1 make -j64 corruption_test && ./corruption_test
make clean && DEBUG_LEVEL=0 make -j64 db_bench
make clean && DEBUG_LEVEL=0 NO_THREEWAY_CRC32C=1 make -j64 db_bench
Closes https://github.com/facebook/rocksdb/pull/3173
Differential Revision: D6330882
Pulled By: yingsu00
fbshipit-source-id: 8ec3d89719533b63b536a736663ca6f0dd4482e9
2017-12-20 02:20:50 +00:00
|
|
|
#include <cstdint>
|
|
|
|
#include <wmmintrin.h>
|
|
|
|
int main() {
|
|
|
|
const auto a = _mm_set_epi64x(0, 0);
|
|
|
|
const auto b = _mm_set_epi64x(0, 0);
|
|
|
|
const auto c = _mm_clmulepi64_si128(a, b, 0x00);
|
|
|
|
auto d = _mm_cvtsi128_si64(c);
|
2019-05-15 22:57:04 +00:00
|
|
|
(void)d;
|
Port 3 way SSE4.2 crc32c implementation from Folly
Summary:
**# Summary**
RocksDB uses SSE crc32 intrinsics to calculate the crc32 values but it does it in single way fashion (not pipelined on single CPU core). Intel's whitepaper () published an algorithm that uses 3-way pipelining for the crc32 intrinsics, then use pclmulqdq intrinsic to combine the values. Because pclmulqdq has overhead on its own, this algorithm will show perf gains on buffers larger than 216 bytes, which makes RocksDB a perfect user, since most of the buffers RocksDB call crc32c on is over 4KB. Initial db_bench show tremendous CPU gain.
This change uses the 3-way SSE algorithm by default. The old SSE algorithm is now behind a compiler tag NO_THREEWAY_CRC32C. If user compiles the code with NO_THREEWAY_CRC32C=1 then the old SSE Crc32c algorithm would be used. If the server does not have SSE4.2 at the run time the slow way (Non SSE) will be used.
**# Performance Test Results**
We ran the FillRandom and ReadRandom benchmarks in db_bench. ReadRandom is the point of interest here since it calculates the CRC32 for the in-mem buffers. We did 3 runs for each algorithm.
Before this change the CRC32 value computation takes about 11.5% of total CPU cost, and with the new 3-way algorithm it reduced to around 4.5%. The overall throughput also improved from 25.53MB/s to 27.63MB/s.
1) ReadRandom in db_bench overall metrics
PER RUN
Algorithm | run | micros/op | ops/sec |Throughput (MB/s)
3-way | 1 | 4.143 | 241387 | 26.7
3-way | 2 | 3.775 | 264872 | 29.3
3-way | 3 | 4.116 | 242929 | 26.9
FastCrc32c|1 | 4.037 | 247727 | 27.4
FastCrc32c|2 | 4.648 | 215166 | 23.8
FastCrc32c|3 | 4.352 | 229799 | 25.4
AVG
Algorithm | Average of micros/op | Average of ops/sec | Average of Throughput (MB/s)
3-way | 4.01 | 249,729 | 27.63
FastCrc32c | 4.35 | 230,897 | 25.53
2) Crc32c computation CPU cost (inclusive samples percentage)
PER RUN
Implementation | run | TotalSamples | Crc32c percentage
3-way | 1 | 4,572,250,000 | 4.37%
3-way | 2 | 3,779,250,000 | 4.62%
3-way | 3 | 4,129,500,000 | 4.48%
FastCrc32c | 1 | 4,663,500,000 | 11.24%
FastCrc32c | 2 | 4,047,500,000 | 12.34%
FastCrc32c | 3 | 4,366,750,000 | 11.68%
**# Test Plan**
make -j64 corruption_test && ./corruption_test
By default it uses 3-way SSE algorithm
NO_THREEWAY_CRC32C=1 make -j64 corruption_test && ./corruption_test
make clean && DEBUG_LEVEL=0 make -j64 db_bench
make clean && DEBUG_LEVEL=0 NO_THREEWAY_CRC32C=1 make -j64 db_bench
Closes https://github.com/facebook/rocksdb/pull/3173
Differential Revision: D6330882
Pulled By: yingsu00
fbshipit-source-id: 8ec3d89719533b63b536a736663ca6f0dd4482e9
2017-12-20 02:20:50 +00:00
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
2019-09-13 18:04:52 +00:00
|
|
|
COMMON_FLAGS="$COMMON_FLAGS $TRY_PCLMUL -DHAVE_PCLMUL"
|
Port 3 way SSE4.2 crc32c implementation from Folly
Summary:
**# Summary**
RocksDB uses SSE crc32 intrinsics to calculate the crc32 values but it does it in single way fashion (not pipelined on single CPU core). Intel's whitepaper () published an algorithm that uses 3-way pipelining for the crc32 intrinsics, then use pclmulqdq intrinsic to combine the values. Because pclmulqdq has overhead on its own, this algorithm will show perf gains on buffers larger than 216 bytes, which makes RocksDB a perfect user, since most of the buffers RocksDB call crc32c on is over 4KB. Initial db_bench show tremendous CPU gain.
This change uses the 3-way SSE algorithm by default. The old SSE algorithm is now behind a compiler tag NO_THREEWAY_CRC32C. If user compiles the code with NO_THREEWAY_CRC32C=1 then the old SSE Crc32c algorithm would be used. If the server does not have SSE4.2 at the run time the slow way (Non SSE) will be used.
**# Performance Test Results**
We ran the FillRandom and ReadRandom benchmarks in db_bench. ReadRandom is the point of interest here since it calculates the CRC32 for the in-mem buffers. We did 3 runs for each algorithm.
Before this change the CRC32 value computation takes about 11.5% of total CPU cost, and with the new 3-way algorithm it reduced to around 4.5%. The overall throughput also improved from 25.53MB/s to 27.63MB/s.
1) ReadRandom in db_bench overall metrics
PER RUN
Algorithm | run | micros/op | ops/sec |Throughput (MB/s)
3-way | 1 | 4.143 | 241387 | 26.7
3-way | 2 | 3.775 | 264872 | 29.3
3-way | 3 | 4.116 | 242929 | 26.9
FastCrc32c|1 | 4.037 | 247727 | 27.4
FastCrc32c|2 | 4.648 | 215166 | 23.8
FastCrc32c|3 | 4.352 | 229799 | 25.4
AVG
Algorithm | Average of micros/op | Average of ops/sec | Average of Throughput (MB/s)
3-way | 4.01 | 249,729 | 27.63
FastCrc32c | 4.35 | 230,897 | 25.53
2) Crc32c computation CPU cost (inclusive samples percentage)
PER RUN
Implementation | run | TotalSamples | Crc32c percentage
3-way | 1 | 4,572,250,000 | 4.37%
3-way | 2 | 3,779,250,000 | 4.62%
3-way | 3 | 4,129,500,000 | 4.48%
FastCrc32c | 1 | 4,663,500,000 | 11.24%
FastCrc32c | 2 | 4,047,500,000 | 12.34%
FastCrc32c | 3 | 4,366,750,000 | 11.68%
**# Test Plan**
make -j64 corruption_test && ./corruption_test
By default it uses 3-way SSE algorithm
NO_THREEWAY_CRC32C=1 make -j64 corruption_test && ./corruption_test
make clean && DEBUG_LEVEL=0 make -j64 db_bench
make clean && DEBUG_LEVEL=0 NO_THREEWAY_CRC32C=1 make -j64 db_bench
Closes https://github.com/facebook/rocksdb/pull/3173
Differential Revision: D6330882
Pulled By: yingsu00
fbshipit-source-id: 8ec3d89719533b63b536a736663ca6f0dd4482e9
2017-12-20 02:20:50 +00:00
|
|
|
elif test "$USE_SSE"; then
|
2019-05-15 22:57:04 +00:00
|
|
|
echo "warning: USE_SSE specified but compiler could not use PCLMUL intrinsics, disabling" >&2
|
cross-platform compatibility improvements
Summary:
We've had a couple CockroachDB users fail to build RocksDB on exotic platforms, so I figured I'd try my hand at solving these issues upstream. The problems stem from a) `USE_SSE=1` being too aggressive about turning on SSE4.2, even on toolchains that don't support SSE4.2 and b) RocksDB attempting to detect support for thread-local storage based on OS, even though it can vary by compiler on the same OS.
See the individual commit messages for details. Regarding SSE support, this PR should change virtually nothing for non-CMake based builds. `make`, `PORTABLE=1 make`, `USE_SSE=1 make`, and `PORTABLE=1 USE_SSE=1 make` function exactly as before, except that SSE support will be automatically disabled when a simple SSE4.2-using test program fails to compile, as it does on OpenBSD. (OpenBSD's ports GCC supports SSE4.2, but its binutils do not, so `__SSE_4_2__` is defined but an SSE4.2-using program will fail to assemble.) A warning is emitted in this case. The CMake build is modified to support the same set of options, except that `USE_SSE` is spelled `FORCE_SSE42` because `USE_SSE` is rather useless now that we can automatically detect SSE support, and I figure changing options in the CMake build is less disruptive than changing the non-CMake build.
I've tested these changes on all the platforms I can get my hands on (macOS, Windows MSVC, Windows MinGW, and OpenBSD) and it all works splendidly. Let me know if there's anything you object to—I obviously don't mean to break any of your build pipelines in the process of fixing ours downstream.
Closes https://github.com/facebook/rocksdb/pull/2199
Differential Revision: D5054042
Pulled By: yiwu-arbug
fbshipit-source-id: 938e1fc665c049c02ae15698e1409155b8e72171
2017-05-15 21:42:32 +00:00
|
|
|
fi
|
|
|
|
|
New Bloom filter implementation for full and partitioned filters (#6007)
Summary:
Adds an improved, replacement Bloom filter implementation (FastLocalBloom) for full and partitioned filters in the block-based table. This replacement is faster and more accurate, especially for high bits per key or millions of keys in a single filter.
Speed
The improved speed, at least on recent x86_64, comes from
* Using fastrange instead of modulo (%)
* Using our new hash function (XXH3 preview, added in a previous commit), which is much faster for large keys and only *slightly* slower on keys around 12 bytes if hashing the same size many thousands of times in a row.
* Optimizing the Bloom filter queries with AVX2 SIMD operations. (Added AVX2 to the USE_SSE=1 build.) Careful design was required to support (a) SIMD-optimized queries, (b) compatible non-SIMD code that's simple and efficient, (c) flexible choice of number of probes, and (d) essentially maximized accuracy for a cache-local Bloom filter. Probes are made eight at a time, so any number of probes up to 8 is the same speed, then up to 16, etc.
* Prefetching cache lines when building the filter. Although this optimization could be applied to the old structure as well, it seems to balance out the small added cost of accumulating 64 bit hashes for adding to the filter rather than 32 bit hashes.
Here's nominal speed data from filter_bench (200MB in filters, about 10k keys each, 10 bits filter data / key, 6 probes, avg key size 24 bytes, includes hashing time) on Skylake DE (relatively low clock speed):
$ ./filter_bench -quick -impl=2 -net_includes_hashing # New Bloom filter
Build avg ns/key: 47.7135
Mixed inside/outside queries...
Single filter net ns/op: 26.2825
Random filter net ns/op: 150.459
Average FP rate %: 0.954651
$ ./filter_bench -quick -impl=0 -net_includes_hashing # Old Bloom filter
Build avg ns/key: 47.2245
Mixed inside/outside queries...
Single filter net ns/op: 63.2978
Random filter net ns/op: 188.038
Average FP rate %: 1.13823
Similar build time but dramatically faster query times on hot data (63 ns to 26 ns), and somewhat faster on stale data (188 ns to 150 ns). Performance differences on batched and skewed query loads are between these extremes as expected.
The only other interesting thing about speed is "inside" (query key was added to filter) vs. "outside" (query key was not added to filter) query times. The non-SIMD implementations are substantially slower when most queries are "outside" vs. "inside". This goes against what one might expect or would have observed years ago, as "outside" queries only need about two probes on average, due to short-circuiting, while "inside" always have num_probes (say 6). The problem is probably the nastily unpredictable branch. The SIMD implementation has few branches (very predictable) and has pretty consistent running time regardless of query outcome.
Accuracy
The generally improved accuracy (re: Issue https://github.com/facebook/rocksdb/issues/5857) comes from a better design for probing indices
within a cache line (re: Issue https://github.com/facebook/rocksdb/issues/4120) and improved accuracy for millions of keys in a single filter from using a 64-bit hash function (XXH3p). Design details in code comments.
Accuracy data (generalizes, except old impl gets worse with millions of keys):
Memory bits per key: FP rate percent old impl -> FP rate percent new impl
6: 5.70953 -> 5.69888
8: 2.45766 -> 2.29709
10: 1.13977 -> 0.959254
12: 0.662498 -> 0.411593
16: 0.353023 -> 0.0873754
24: 0.261552 -> 0.0060971
50: 0.225453 -> ~0.00003 (less than 1 in a million queries are FP)
Fixes https://github.com/facebook/rocksdb/issues/5857
Fixes https://github.com/facebook/rocksdb/issues/4120
Unlike the old implementation, this implementation has a fixed cache line size (64 bytes). At 10 bits per key, the accuracy of this new implementation is very close to the old implementation with 128-byte cache line size. If there's sufficient demand, this implementation could be generalized.
Compatibility
Although old releases would see the new structure as corrupt filter data and read the table as if there's no filter, we've decided only to enable the new Bloom filter with new format_version=5. This provides a smooth path for automatic adoption over time, with an option for early opt-in.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6007
Test Plan: filter_bench has been used thoroughly to validate speed, accuracy, and correctness. Unit tests have been carefully updated to exercise new and old implementations, as well as the logic to select an implementation based on context (format_version).
Differential Revision: D18294749
Pulled By: pdillinger
fbshipit-source-id: d44c9db3696e4d0a17caaec47075b7755c262c5f
2019-11-14 00:31:26 +00:00
|
|
|
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_AVX2 -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <cstdint>
|
|
|
|
#include <immintrin.h>
|
|
|
|
int main() {
|
|
|
|
const auto a = _mm256_setr_epi32(0, 1, 2, 3, 4, 7, 6, 5);
|
|
|
|
const auto b = _mm256_permutevar8x32_epi32(a, a);
|
|
|
|
(void)b;
|
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS $TRY_AVX2 -DHAVE_AVX2"
|
|
|
|
elif test "$USE_SSE"; then
|
|
|
|
echo "warning: USE_SSE specified but compiler could not use AVX2 intrinsics, disabling" >&2
|
|
|
|
fi
|
|
|
|
|
2019-10-25 00:14:27 +00:00
|
|
|
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#include <cstdint>
|
|
|
|
int main() {
|
|
|
|
uint64_t a = 0xffffFFFFffffFFFF;
|
|
|
|
__uint128_t b = __uint128_t(a) * a;
|
|
|
|
a = static_cast<uint64_t>(b >> 64);
|
|
|
|
(void)a;
|
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DHAVE_UINT128_EXTENSION"
|
|
|
|
fi
|
|
|
|
|
cross-platform compatibility improvements
Summary:
We've had a couple CockroachDB users fail to build RocksDB on exotic platforms, so I figured I'd try my hand at solving these issues upstream. The problems stem from a) `USE_SSE=1` being too aggressive about turning on SSE4.2, even on toolchains that don't support SSE4.2 and b) RocksDB attempting to detect support for thread-local storage based on OS, even though it can vary by compiler on the same OS.
See the individual commit messages for details. Regarding SSE support, this PR should change virtually nothing for non-CMake based builds. `make`, `PORTABLE=1 make`, `USE_SSE=1 make`, and `PORTABLE=1 USE_SSE=1 make` function exactly as before, except that SSE support will be automatically disabled when a simple SSE4.2-using test program fails to compile, as it does on OpenBSD. (OpenBSD's ports GCC supports SSE4.2, but its binutils do not, so `__SSE_4_2__` is defined but an SSE4.2-using program will fail to assemble.) A warning is emitted in this case. The CMake build is modified to support the same set of options, except that `USE_SSE` is spelled `FORCE_SSE42` because `USE_SSE` is rather useless now that we can automatically detect SSE support, and I figure changing options in the CMake build is less disruptive than changing the non-CMake build.
I've tested these changes on all the platforms I can get my hands on (macOS, Windows MSVC, Windows MinGW, and OpenBSD) and it all works splendidly. Let me know if there's anything you object to—I obviously don't mean to break any of your build pipelines in the process of fixing ours downstream.
Closes https://github.com/facebook/rocksdb/pull/2199
Differential Revision: D5054042
Pulled By: yiwu-arbug
fbshipit-source-id: 938e1fc665c049c02ae15698e1409155b8e72171
2017-05-15 21:42:32 +00:00
|
|
|
# iOS doesn't support thread-local storage, but this check would erroneously
|
|
|
|
# succeed because the cross-compiler flags are added by the Makefile, not this
|
|
|
|
# script.
|
|
|
|
if [ "$PLATFORM" != IOS ]; then
|
|
|
|
$CXX $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
|
|
|
#if defined(_MSC_VER) && !defined(__thread)
|
|
|
|
#define __thread __declspec(thread)
|
|
|
|
#endif
|
|
|
|
int main() {
|
|
|
|
static __thread int tls;
|
2019-05-15 22:57:04 +00:00
|
|
|
(void)tls;
|
cross-platform compatibility improvements
Summary:
We've had a couple CockroachDB users fail to build RocksDB on exotic platforms, so I figured I'd try my hand at solving these issues upstream. The problems stem from a) `USE_SSE=1` being too aggressive about turning on SSE4.2, even on toolchains that don't support SSE4.2 and b) RocksDB attempting to detect support for thread-local storage based on OS, even though it can vary by compiler on the same OS.
See the individual commit messages for details. Regarding SSE support, this PR should change virtually nothing for non-CMake based builds. `make`, `PORTABLE=1 make`, `USE_SSE=1 make`, and `PORTABLE=1 USE_SSE=1 make` function exactly as before, except that SSE support will be automatically disabled when a simple SSE4.2-using test program fails to compile, as it does on OpenBSD. (OpenBSD's ports GCC supports SSE4.2, but its binutils do not, so `__SSE_4_2__` is defined but an SSE4.2-using program will fail to assemble.) A warning is emitted in this case. The CMake build is modified to support the same set of options, except that `USE_SSE` is spelled `FORCE_SSE42` because `USE_SSE` is rather useless now that we can automatically detect SSE support, and I figure changing options in the CMake build is less disruptive than changing the non-CMake build.
I've tested these changes on all the platforms I can get my hands on (macOS, Windows MSVC, Windows MinGW, and OpenBSD) and it all works splendidly. Let me know if there's anything you object to—I obviously don't mean to break any of your build pipelines in the process of fixing ours downstream.
Closes https://github.com/facebook/rocksdb/pull/2199
Differential Revision: D5054042
Pulled By: yiwu-arbug
fbshipit-source-id: 938e1fc665c049c02ae15698e1409155b8e72171
2017-05-15 21:42:32 +00:00
|
|
|
}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SUPPORT_THREAD_LOCAL"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2019-06-04 05:59:54 +00:00
|
|
|
if [ "$FBCODE_BUILD" != "true" -a "$PLATFORM" = OS_LINUX ]; then
|
|
|
|
$CXX $COMMON_FLAGS $PLATFORM_SHARED_CFLAGS -x c++ -c - -o test_dl.o 2>/dev/null <<EOF
|
|
|
|
void dummy_func() {}
|
|
|
|
EOF
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
$CXX $COMMON_FLAGS $PLATFORM_SHARED_LDFLAGS test_dl.o -o /dev/null 2>/dev/null
|
|
|
|
if [ "$?" = 0 ]; then
|
|
|
|
EXEC_LDFLAGS+="-ldl"
|
|
|
|
rm -f test_dl.o
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2012-03-21 17:28:03 +00:00
|
|
|
PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
|
|
|
|
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
|
|
|
|
|
2013-03-07 19:11:30 +00:00
|
|
|
VALGRIND_VER="$VALGRIND_VER"
|
|
|
|
|
2014-10-02 18:59:22 +00:00
|
|
|
ROCKSDB_MAJOR=`build_tools/version.sh major`
|
|
|
|
ROCKSDB_MINOR=`build_tools/version.sh minor`
|
|
|
|
ROCKSDB_PATCH=`build_tools/version.sh patch`
|
|
|
|
|
2014-05-11 04:01:25 +00:00
|
|
|
echo "CC=$CC" >> "$OUTPUT"
|
|
|
|
echo "CXX=$CXX" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM=$PLATFORM" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> "$OUTPUT"
|
2014-07-22 05:41:54 +00:00
|
|
|
echo "JAVA_LDFLAGS=$JAVA_LDFLAGS" >> "$OUTPUT"
|
2015-10-09 18:41:40 +00:00
|
|
|
echo "JAVA_STATIC_LDFLAGS=$JAVA_STATIC_LDFLAGS" >> "$OUTPUT"
|
2014-05-11 04:01:25 +00:00
|
|
|
echo "VALGRIND_VER=$VALGRIND_VER" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> "$OUTPUT"
|
|
|
|
echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> "$OUTPUT"
|
|
|
|
echo "EXEC_LDFLAGS=$EXEC_LDFLAGS" >> "$OUTPUT"
|
|
|
|
echo "JEMALLOC_INCLUDE=$JEMALLOC_INCLUDE" >> "$OUTPUT"
|
|
|
|
echo "JEMALLOC_LIB=$JEMALLOC_LIB" >> "$OUTPUT"
|
2014-10-02 18:59:22 +00:00
|
|
|
echo "ROCKSDB_MAJOR=$ROCKSDB_MAJOR" >> "$OUTPUT"
|
|
|
|
echo "ROCKSDB_MINOR=$ROCKSDB_MINOR" >> "$OUTPUT"
|
|
|
|
echo "ROCKSDB_PATCH=$ROCKSDB_PATCH" >> "$OUTPUT"
|
2015-02-04 05:43:06 +00:00
|
|
|
echo "CLANG_SCAN_BUILD=$CLANG_SCAN_BUILD" >> "$OUTPUT"
|
|
|
|
echo "CLANG_ANALYZER=$CLANG_ANALYZER" >> "$OUTPUT"
|
2016-04-22 23:49:12 +00:00
|
|
|
echo "PROFILING_FLAGS=$PROFILING_FLAGS" >> "$OUTPUT"
|
2018-03-19 19:11:58 +00:00
|
|
|
echo "FIND=$FIND" >> "$OUTPUT"
|
|
|
|
echo "WATCH=$WATCH" >> "$OUTPUT"
|
2017-08-04 17:27:39 +00:00
|
|
|
# This will enable some related identifiers for the preprocessor
|
2016-04-28 01:25:19 +00:00
|
|
|
if test -n "$JEMALLOC"; then
|
2016-04-27 23:23:33 +00:00
|
|
|
echo "JEMALLOC=1" >> "$OUTPUT"
|
|
|
|
fi
|
2017-08-04 17:27:39 +00:00
|
|
|
# Indicates that jemalloc should be enabled using -ljemalloc flag
|
|
|
|
# The alternative is to porvide a direct link to the library via JEMALLOC_LIB
|
|
|
|
# and JEMALLOC_INCLUDE
|
|
|
|
if test -n "$WITH_JEMALLOC_FLAG"; then
|
|
|
|
echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT"
|
|
|
|
fi
|
2016-11-16 23:27:02 +00:00
|
|
|
echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT"
|
2019-08-07 21:29:35 +00:00
|
|
|
if test -n "$USE_FOLLY_DISTRIBUTED_MUTEX"; then
|
|
|
|
echo "USE_FOLLY_DISTRIBUTED_MUTEX=$USE_FOLLY_DISTRIBUTED_MUTEX" >> "$OUTPUT"
|
|
|
|
fi
|