2011-03-18 22:37:00 +00:00
|
|
|
# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
# Use of this source code is governed by a BSD-style license that can be
|
|
|
|
# found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2012-03-30 20:15:49 +00:00
|
|
|
# Inherit some settings from environment variables, if available
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2011-06-02 00:00:37 +00:00
|
|
|
#-----------------------------------------------
|
2014-02-07 00:11:35 +00:00
|
|
|
|
2014-11-26 14:48:03 +00:00
|
|
|
CFLAGS += ${EXTRA_CFLAGS}
|
|
|
|
CXXFLAGS += ${EXTRA_CXXFLAGS}
|
|
|
|
LDFLAGS += $(EXTRA_LDFLAGS)
|
|
|
|
MACHINE ?= $(shell uname -m)
|
2015-02-27 17:31:29 +00:00
|
|
|
ARFLAGS = rs
|
2014-11-26 14:48:03 +00:00
|
|
|
|
2014-02-07 00:11:35 +00:00
|
|
|
ifneq ($(MAKECMDGOALS),dbg)
|
2014-11-26 14:48:03 +00:00
|
|
|
OPT += -O2 -fno-omit-frame-pointer
|
|
|
|
ifneq ($(MACHINE),ppc64) # ppc64 doesn't support -momit-leaf-frame-pointer
|
|
|
|
OPT += -momit-leaf-frame-pointer
|
|
|
|
endif
|
2014-02-07 00:11:35 +00:00
|
|
|
else
|
2014-03-04 05:05:58 +00:00
|
|
|
# intentionally left blank
|
2014-02-07 00:11:35 +00:00
|
|
|
endif
|
2014-02-28 06:15:30 +00:00
|
|
|
|
|
|
|
ifeq ($(MAKECMDGOALS),shared_lib)
|
2014-04-15 20:39:26 +00:00
|
|
|
OPT += -DNDEBUG
|
|
|
|
endif
|
2014-04-17 17:49:58 +00:00
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
ifeq ($(MAKECMDGOALS),static_lib)
|
|
|
|
OPT += -DNDEBUG
|
2014-02-28 06:15:30 +00:00
|
|
|
endif
|
2014-04-15 20:39:26 +00:00
|
|
|
|
2011-06-02 00:00:37 +00:00
|
|
|
#-----------------------------------------------
|
build: fix missing dependency problems
Summary:
Any time one would modify a dependent of any *test*.cc file,
"make" would fail to rebuild the affected test binaries,
e.g., db_test. That was due to the fact that we deliberately
excluded those test-related files from the definition of SOURCES
and only $(SOURCES) was used to create the automatically-generated
.d dependency files. The fix is to generate a .d file for every
source file.
* src.mk: New file. Defines LIB_SOURCES, MOCK_SOURCES
and TEST_BENCH_SOURCES.
* Makefile: Include src.mk.
Reflect s/SOURCES/LIB_SOURCES/ renaming.
* build_tools/build_detect_platform: Remove the code
that was used to generate SOURCES= and MOCK_SOURCES=
definitions in make_config.mk. Those lists of files
are now hard-coded in src.mk. Hard-coding this list of
sources is desirable, because without that, one risks
including stray .cc files in a build. Not reproducible.
Test Plan:
Touch a file used by db_test's dependent .o files and ensure that
they are all recompiled. Before, none would be:
$ touch db/db_impl.h && make db_test
CC db/db_test.o
CC db/column_family.o
CC db/db_filesnapshot.o
CC db/db_impl.o
CC db/db_impl_debug.o
CC db/db_impl_readonly.o
CC db/forward_iterator.o
CC db/internal_stats.o
CC db/managed_iterator.o
CC db/repair.o
CC db/write_batch.o
CC utilities/compacted_db/compacted_db_impl.o
CC utilities/ttl/db_ttl_impl.o
CC util/ldb_cmd.o
CC util/ldb_tool.o
CC util/sst_dump_tool.o
CC util/xfunc.o
CCLD db_test
Reviewers: ljin, igor.sugak, igor, rven, sdong
Reviewed By: sdong
Subscribers: yhchiang, adamretter, fyrz, dhruba
Differential Revision: https://reviews.facebook.net/D33849
2015-02-21 01:42:16 +00:00
|
|
|
include src.mk
|
2011-06-02 00:00:37 +00:00
|
|
|
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
AM_DEFAULT_VERBOSITY = 0
|
|
|
|
|
|
|
|
AM_V_GEN = $(am__v_GEN_$(V))
|
|
|
|
am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
|
|
|
|
am__v_GEN_0 = @echo " GEN " $@;
|
|
|
|
am__v_GEN_1 =
|
2015-02-19 19:48:09 +00:00
|
|
|
AM_V_at = $(am__v_at_$(V))
|
|
|
|
am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY))
|
|
|
|
am__v_at_0 = @
|
|
|
|
am__v_at_1 =
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
|
|
|
|
AM_V_CC = $(am__v_CC_$(V))
|
|
|
|
am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY))
|
|
|
|
am__v_CC_0 = @echo " CC " $@;
|
|
|
|
am__v_CC_1 =
|
|
|
|
CCLD = $(CC)
|
|
|
|
LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
|
|
|
|
AM_V_CCLD = $(am__v_CCLD_$(V))
|
|
|
|
am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY))
|
|
|
|
am__v_CCLD_0 = @echo " CCLD " $@;
|
|
|
|
am__v_CCLD_1 =
|
2015-02-19 19:48:09 +00:00
|
|
|
AM_V_AR = $(am__v_AR_$(V))
|
|
|
|
am__v_AR_ = $(am__v_AR_$(AM_DEFAULT_VERBOSITY))
|
|
|
|
am__v_AR_0 = @echo " AR " $@;
|
|
|
|
am__v_AR_1 =
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
|
2015-02-18 19:43:30 +00:00
|
|
|
AM_LINK = $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
|
|
|
|
|
2011-06-29 00:30:50 +00:00
|
|
|
# detect what platform we're building on
|
2015-02-14 00:26:35 +00:00
|
|
|
dummy := $(shell (export ROCKSDB_ROOT="$(CURDIR)"; "$(CURDIR)/build_tools/build_detect_platform" "$(CURDIR)/make_config.mk"))
|
2012-04-17 15:36:46 +00:00
|
|
|
# this file is generated by the previous line to set build flags and sources
|
2015-01-08 01:26:24 +00:00
|
|
|
include make_config.mk
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-04 20:11:44 +00:00
|
|
|
ifneq ($(PLATFORM), IOS)
|
|
|
|
CFLAGS += -g
|
|
|
|
CXXFLAGS += -g
|
|
|
|
else
|
|
|
|
# no debug info for IOS, that will make our library big
|
|
|
|
OPT += -DNDEBUG
|
|
|
|
endif
|
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
ifneq ($(filter -DROCKSDB_LITE,$(OPT)),)
|
2014-12-06 05:34:20 +00:00
|
|
|
# found
|
|
|
|
CFLAGS += -fno-exceptions
|
|
|
|
CXXFLAGS += -fno-exceptions
|
|
|
|
endif
|
|
|
|
|
2013-11-20 00:33:24 +00:00
|
|
|
# ASAN doesn't work well with jemalloc. If we're compiling with ASAN, we should use regular malloc.
|
|
|
|
ifdef COMPILE_WITH_ASAN
|
2015-01-23 19:22:20 +00:00
|
|
|
DISABLE_JEMALLOC=1
|
2013-11-20 00:33:24 +00:00
|
|
|
EXEC_LDFLAGS += -fsanitize=address
|
|
|
|
PLATFORM_CCFLAGS += -fsanitize=address
|
|
|
|
PLATFORM_CXXFLAGS += -fsanitize=address
|
2015-01-23 19:22:20 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
# TSAN doesn't work well with jemalloc. If we're compiling with TSAN, we should use regular malloc.
|
|
|
|
ifdef COMPILE_WITH_TSAN
|
|
|
|
DISABLE_JEMALLOC=1
|
|
|
|
EXEC_LDFLAGS += -fsanitize=thread -pie
|
2015-01-28 23:31:48 +00:00
|
|
|
PLATFORM_CCFLAGS += -fsanitize=thread -fPIC -DROCKSDB_TSAN_RUN
|
|
|
|
PLATFORM_CXXFLAGS += -fsanitize=thread -fPIC -DROCKSDB_TSAN_RUN
|
2015-01-23 19:22:20 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
ifndef DISABLE_JEMALLOC
|
2013-11-20 00:33:24 +00:00
|
|
|
EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS)
|
|
|
|
PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE) -DHAVE_JEMALLOC
|
|
|
|
PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE) -DHAVE_JEMALLOC
|
|
|
|
endif
|
|
|
|
|
2015-03-17 01:27:30 +00:00
|
|
|
export GTEST_THROW_ON_FAILURE=1 GTEST_HAS_EXCEPTIONS=1
|
|
|
|
GTEST_DIR = ./third-party/gtest-1.7.0/fused-src
|
|
|
|
PLATFORM_CCFLAGS += -isystem $(GTEST_DIR)
|
|
|
|
PLATFORM_CXXFLAGS += -isystem $(GTEST_DIR)
|
|
|
|
|
2015-02-17 19:17:44 +00:00
|
|
|
# This (the first rule) must depend on "all".
|
|
|
|
default: all
|
|
|
|
|
2014-09-12 03:41:02 +00:00
|
|
|
#-------------------------------------------------
|
|
|
|
# make install related stuff
|
|
|
|
INSTALL_PATH ?= /usr/local
|
|
|
|
|
|
|
|
uninstall:
|
2015-02-17 19:17:44 +00:00
|
|
|
rm -rf $(INSTALL_PATH)/include/rocksdb \
|
|
|
|
$(INSTALL_PATH)/lib/$(LIBRARY) \
|
|
|
|
$(INSTALL_PATH)/lib/$(SHARED)
|
2014-09-12 03:41:02 +00:00
|
|
|
|
|
|
|
install:
|
2015-02-17 19:17:44 +00:00
|
|
|
install -d $(INSTALL_PATH)/lib
|
|
|
|
for header_dir in `find "include/rocksdb" -type d`; do \
|
2014-09-15 22:30:17 +00:00
|
|
|
install -d $(INSTALL_PATH)/$$header_dir; \
|
2014-09-12 03:41:02 +00:00
|
|
|
done
|
2015-02-17 19:17:44 +00:00
|
|
|
for header in `find "include/rocksdb" -type f -name *.h`; do \
|
2014-09-15 22:30:17 +00:00
|
|
|
install -C -m 644 $$header $(INSTALL_PATH)/$$header; \
|
|
|
|
done
|
2015-02-17 19:17:44 +00:00
|
|
|
[ ! -e $(LIBRARY) ] || install -C -m 644 $(LIBRARY) $(INSTALL_PATH)/lib
|
|
|
|
[ ! -e $(SHARED) ] || install -C -m 644 $(SHARED) $(INSTALL_PATH)/lib
|
2014-09-12 03:41:02 +00:00
|
|
|
#-------------------------------------------------
|
|
|
|
|
2015-02-24 17:52:28 +00:00
|
|
|
WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \
|
2015-02-20 00:45:32 +00:00
|
|
|
-Wno-unused-parameter
|
|
|
|
|
2015-02-24 17:52:28 +00:00
|
|
|
ifndef DISABLE_WARNING_AS_ERROR
|
|
|
|
WARNING_FLAGS += -Werror
|
|
|
|
endif
|
|
|
|
|
2014-04-04 20:11:44 +00:00
|
|
|
CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
|
2015-03-17 01:27:30 +00:00
|
|
|
CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers
|
2011-06-29 00:30:50 +00:00
|
|
|
|
2012-03-21 17:28:03 +00:00
|
|
|
LDFLAGS += $(PLATFORM_LDFLAGS)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
build: do not relink every single binary just for a timestamp
Summary:
Prior to this change, "make check" would always waste a lot of
time relinking 60+ binaries. With this change, it does that
only when the generated file, util/build_version.cc, changes,
and that happens only when the date changes or when the
current git SHA changes.
This change makes some other improvements: before, there was no
rule to build a deleted util/build_version.cc. If it was somehow
removed, any attempt to link a program would fail.
There is no longer any need for the separate file,
build_tools/build_detect_version. Its functionality is
now in the Makefile.
* Makefile (DEPFILES): Don't filter-out util/build_version.cc.
No need, and besides, removing that dependency was wrong.
(date, git_sha, gen_build_version): New helper variables.
(util/build_version.cc): New rule, to create this file
and update it only if it would contain new information.
* build_tools/build_detect_platform: Remove file.
* db/db_impl.cc: Now, print only date (not the time).
* util/build_version.h (rocksdb_build_compile_time): Remove
declaration. No longer used.
Test Plan:
- Run "make check" twice, and note that the second time no linking is performed.
- Remove util/build_version.cc and ensure that any "make"
command regenerates it before doing anything else.
- Run this: strings librocksdb.a|grep _build_.
That prints output including the following:
rocksdb_build_git_date:2015-02-19
rocksdb_build_git_sha:2.8.fb-1792-g3cb6cc0
Reviewers: ljin, sdong, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33591
2015-02-19 21:11:10 +00:00
|
|
|
date := $(shell date +%F)
|
|
|
|
git_sha := $(shell git describe HEAD 2>/dev/null)
|
|
|
|
gen_build_version = \
|
|
|
|
printf '%s\n' \
|
|
|
|
'\#include "build_version.h"' \
|
|
|
|
'const char* rocksdb_build_git_sha = \
|
|
|
|
"rocksdb_build_git_sha:$(git_sha)";' \
|
|
|
|
'const char* rocksdb_build_git_date = \
|
|
|
|
"rocksdb_build_git_date:$(date)";' \
|
|
|
|
'const char* rocksdb_build_compile_date = __DATE__;'
|
|
|
|
|
|
|
|
# Record the version of the source that we are compiling.
|
|
|
|
# We keep a record of the git revision in this file. It is then built
|
|
|
|
# as a regular source file as part of the compilation process.
|
|
|
|
# One can run "strings executable_filename | grep _build_" to find
|
|
|
|
# the version of the source that we used to build the executable file.
|
|
|
|
util/build_version.cc:
|
|
|
|
$(AM_V_GEN)$(gen_build_version) > $@.tmp
|
|
|
|
$(AM_V_at)if test -f $@; then \
|
|
|
|
cmp -s $@.tmp $@ && : || mv -f $@.tmp $@; else mv -f $@.tmp $@; fi
|
|
|
|
|
build: fix missing dependency problems
Summary:
Any time one would modify a dependent of any *test*.cc file,
"make" would fail to rebuild the affected test binaries,
e.g., db_test. That was due to the fact that we deliberately
excluded those test-related files from the definition of SOURCES
and only $(SOURCES) was used to create the automatically-generated
.d dependency files. The fix is to generate a .d file for every
source file.
* src.mk: New file. Defines LIB_SOURCES, MOCK_SOURCES
and TEST_BENCH_SOURCES.
* Makefile: Include src.mk.
Reflect s/SOURCES/LIB_SOURCES/ renaming.
* build_tools/build_detect_platform: Remove the code
that was used to generate SOURCES= and MOCK_SOURCES=
definitions in make_config.mk. Those lists of files
are now hard-coded in src.mk. Hard-coding this list of
sources is desirable, because without that, one risks
including stray .cc files in a build. Not reproducible.
Test Plan:
Touch a file used by db_test's dependent .o files and ensure that
they are all recompiled. Before, none would be:
$ touch db/db_impl.h && make db_test
CC db/db_test.o
CC db/column_family.o
CC db/db_filesnapshot.o
CC db/db_impl.o
CC db/db_impl_debug.o
CC db/db_impl_readonly.o
CC db/forward_iterator.o
CC db/internal_stats.o
CC db/managed_iterator.o
CC db/repair.o
CC db/write_batch.o
CC utilities/compacted_db/compacted_db_impl.o
CC utilities/ttl/db_ttl_impl.o
CC util/ldb_cmd.o
CC util/ldb_tool.o
CC util/sst_dump_tool.o
CC util/xfunc.o
CCLD db_test
Reviewers: ljin, igor.sugak, igor, rven, sdong
Reviewed By: sdong
Subscribers: yhchiang, adamretter, fyrz, dhruba
Differential Revision: https://reviews.facebook.net/D33849
2015-02-21 01:42:16 +00:00
|
|
|
LIBOBJECTS = $(LIB_SOURCES:.cc=.o)
|
2014-10-29 00:52:32 +00:00
|
|
|
MOCKOBJECTS = $(MOCK_SOURCES:.cc=.o)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-03-17 01:27:30 +00:00
|
|
|
GTEST = $(GTEST_DIR)/gtest/gtest-all.o
|
2011-03-18 22:37:00 +00:00
|
|
|
TESTUTIL = ./util/testutil.o
|
2015-03-17 01:27:30 +00:00
|
|
|
TESTHARNESS = ./util/testharness.o $(TESTUTIL) $(MOCKOBJECTS) $(GTEST)
|
2014-04-21 20:01:50 +00:00
|
|
|
BENCHHARNESS = ./util/benchharness.o
|
2013-02-25 19:17:26 +00:00
|
|
|
VALGRIND_ERROR = 2
|
2013-08-14 20:29:05 +00:00
|
|
|
VALGRIND_DIR = build_tools/VALGRIND_LOGS
|
2013-03-07 19:11:30 +00:00
|
|
|
VALGRIND_VER := $(join $(VALGRIND_VER),valgrind)
|
2015-01-26 21:59:38 +00:00
|
|
|
|
2013-02-25 19:17:26 +00:00
|
|
|
VALGRIND_OPTS = --error-exitcode=$(VALGRIND_ERROR) --leak-check=full
|
2012-08-14 21:52:48 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TESTS = \
|
2015-02-05 00:24:52 +00:00
|
|
|
db_test \
|
2014-07-16 21:51:43 +00:00
|
|
|
db_iter_test \
|
2014-02-18 22:58:55 +00:00
|
|
|
block_hash_index_test \
|
2014-01-02 11:30:29 +00:00
|
|
|
autovector_test \
|
2014-01-02 17:08:12 +00:00
|
|
|
column_family_test \
|
2013-11-20 00:29:42 +00:00
|
|
|
table_properties_collector_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
arena_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
auto_roll_logger_test \
|
2014-04-21 20:01:50 +00:00
|
|
|
benchharness_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
block_test \
|
2012-04-17 15:36:46 +00:00
|
|
|
bloom_test \
|
2013-11-27 22:27:02 +00:00
|
|
|
dynamic_bloom_test \
|
2011-08-05 20:40:49 +00:00
|
|
|
c_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
cache_test \
|
|
|
|
coding_test \
|
|
|
|
corruption_test \
|
|
|
|
crc32c_test \
|
2015-01-21 19:09:56 +00:00
|
|
|
slice_transform_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
dbformat_test \
|
|
|
|
env_test \
|
2015-01-15 18:28:10 +00:00
|
|
|
fault_injection_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
filelock_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
filename_test \
|
2014-09-08 17:37:05 +00:00
|
|
|
block_based_filter_block_test \
|
|
|
|
full_filter_block_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
histogram_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
log_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
manual_compaction_test \
|
2011-09-12 09:21:10 +00:00
|
|
|
memenv_test \
|
2014-10-31 22:08:10 +00:00
|
|
|
mock_env_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
merge_test \
|
2014-09-09 05:24:40 +00:00
|
|
|
merger_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
redis_test \
|
|
|
|
reduce_levels_test \
|
2013-10-29 03:34:02 +00:00
|
|
|
plain_table_db_test \
|
2014-10-29 20:49:45 +00:00
|
|
|
comparator_db_test \
|
2014-04-01 00:06:53 +00:00
|
|
|
prefix_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
skiplist_test \
|
2013-08-14 23:32:46 +00:00
|
|
|
stringappend_test \
|
Timestamp and TTL Wrapper for rocksdb
Summary:
When opened with DBTimestamp::Open call, timestamps are prepended to and stripped from the value during subsequent Put and Get calls respectively. The Timestamp is used to discard values in Get and custom compaction filter which have exceeded their TTL which is specified during Open.
Have made a temporary change to Makefile to let us test with the temporary file TestTime.cc. Have also changed the private members of db_impl.h to protected to let them be inherited by the new class DBTimestamp
Test Plan: make db_timestamp; TestTime.cc(will not check it in) shows how to use the apis currently, but I will write unit-tests shortly
Reviewers: dhruba, vamsi, haobo, sheki, heyongqiang, vkrest
Reviewed By: vamsi
CC: zshao, xjin, vkrest, MarkCallaghan
Differential Revision: https://reviews.facebook.net/D10311
2013-04-15 20:33:13 +00:00
|
|
|
ttl_test \
|
[RocksDB] BackupableDB
Summary:
In this diff I present you BackupableDB v1. You can easily use it to backup your DB and it will do incremental snapshots for you.
Let's first describe how you would use BackupableDB. It's inheriting StackableDB interface so you can easily construct it with your DB object -- it will add a method RollTheSnapshot() to the DB object. When you call RollTheSnapshot(), current snapshot of the DB will be stored in the backup dir. To restore, you can just call RestoreDBFromBackup() on a BackupableDB (which is a static method) and it will restore all files from the backup dir. In the next version, it will even support automatic backuping every X minutes.
There are multiple things you can configure:
1. backup_env and db_env can be different, which is awesome because then you can easily backup to HDFS or wherever you feel like.
2. sync - if true, it *guarantees* backup consistency on machine reboot
3. number of snapshots to keep - this will keep last N snapshots around if you want, for some reason, be able to restore from an earlier snapshot. All the backuping is done in incremental fashion - if we already have 00010.sst, we will not copy it again. *IMPORTANT* -- This is based on assumption that 00010.sst never changes - two files named 00010.sst from the same DB will always be exactly the same. Is this true? I always copy manifest, current and log files.
4. You can decide if you want to flush the memtables before you backup, or you're fine with backing up the log files -- either way, you get a complete and consistent view of the database at a time of backup.
5. More things you can find in BackupableDBOptions
Here is the directory structure I use:
backup_dir/CURRENT_SNAPSHOT - just 4 bytes holding the latest snapshot
0, 1, 2, ... - files containing serialized version of each snapshot - containing a list of files
files/*.sst - sst files shared between snapshots - if one snapshot references 00010.sst and another one needs to backup it from the DB, it will just reference the same file
files/ 0/, 1/, 2/, ... - snapshot directories containing private snapshot files - current, manifest and log files
All the files are ref counted and deleted immediatelly when they get out of scope.
Some other stuff in this diff:
1. Added GetEnv() method to the DB. Discussed with @haobo and we agreed that it seems right thing to do.
2. Fixed StackableDB interface. The way it was set up before, I was not able to implement BackupableDB.
Test Plan:
I have a unittest, but please don't look at this yet. I just hacked it up to help me with debugging. I will write a lot of good tests and update the diff.
Also, `make asan_check`
Reviewers: dhruba, haobo, emayanke
Reviewed By: dhruba
CC: leveldb, haobo
Differential Revision: https://reviews.facebook.net/D14295
2013-12-09 22:06:52 +00:00
|
|
|
backupable_db_test \
|
2014-07-10 16:31:42 +00:00
|
|
|
document_db_test \
|
2014-06-20 09:14:14 +00:00
|
|
|
json_document_test \
|
SpatialDB
Summary:
This diff is adding spatial index support to RocksDB.
When creating the DB user specifies a list of spatial indexes. Spatial indexes can cover different areas and have different resolution (i.e. number of tiles). This is useful for supporting different zoom levels.
Each element inserted into SpatialDB has:
* a bounding box, which determines how will the element be indexed
* string blob, which will usually be WKB representation of the polygon (http://en.wikipedia.org/wiki/Well-known_text)
* feature set, which is a map of key-value pairs, where value can be int, double, bool, null or a string. FeatureSet will be a set of tags associated with geo elements (for example, 'road': 'highway' and similar)
* a list of indexes to insert the element in. For example, small river element will be inserted in index for high zoom level, while country border will be inserted in all indexes (including the index for low zoom level).
Each query is executed on single spatial index. Query guarantees that it will return all elements intersecting the specified bounding box, but it might also return some extra non-intersecting elements.
Test Plan: Added bunch of unit tests in spatial_db_test
Reviewers: dhruba, yinwang
Reviewed By: yinwang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D20361
2014-07-23 18:22:58 +00:00
|
|
|
spatial_db_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
version_edit_test \
|
2011-06-22 02:36:45 +00:00
|
|
|
version_set_test \
|
2014-10-31 22:04:01 +00:00
|
|
|
compaction_picker_test \
|
2014-10-31 15:48:19 +00:00
|
|
|
version_builder_test \
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
file_indexer_test \
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
write_batch_test \
|
|
|
|
write_controller_test\
|
2013-11-20 00:29:42 +00:00
|
|
|
deletefile_test \
|
2014-02-26 01:47:37 +00:00
|
|
|
table_test \
|
2013-12-21 01:17:00 +00:00
|
|
|
thread_local_test \
|
2014-05-16 17:35:41 +00:00
|
|
|
geodb_test \
|
2014-08-13 00:13:15 +00:00
|
|
|
rate_limiter_test \
|
2014-07-25 23:37:32 +00:00
|
|
|
options_test \
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 17:15:54 +00:00
|
|
|
event_logger_test \
|
2014-08-12 03:21:07 +00:00
|
|
|
cuckoo_table_builder_test \
|
|
|
|
cuckoo_table_reader_test \
|
2014-08-18 22:19:17 +00:00
|
|
|
cuckoo_table_db_test \
|
2014-10-30 00:43:37 +00:00
|
|
|
flush_job_test \
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
wal_manager_test \
|
|
|
|
listener_test \
|
2014-11-20 18:49:32 +00:00
|
|
|
compaction_job_test \
|
2014-12-23 21:24:07 +00:00
|
|
|
thread_list_test \
|
2015-03-11 20:06:59 +00:00
|
|
|
sst_dump_test \
|
|
|
|
compact_files_test
|
2013-02-19 08:09:16 +00:00
|
|
|
|
2014-12-06 00:16:56 +00:00
|
|
|
SUBSET := $(shell echo $(TESTS) |sed s/^.*$(ROCKSDBTESTS_START)/$(ROCKSDBTESTS_START)/)
|
|
|
|
|
2012-08-17 17:48:40 +00:00
|
|
|
TOOLS = \
|
2014-04-15 18:29:02 +00:00
|
|
|
sst_dump \
|
2014-04-01 00:06:53 +00:00
|
|
|
db_sanity_test \
|
2014-04-15 18:29:02 +00:00
|
|
|
db_stress \
|
|
|
|
ldb \
|
2013-11-20 00:33:24 +00:00
|
|
|
db_repl_stress \
|
2014-12-22 21:18:57 +00:00
|
|
|
options_test \
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Memtablerep Benchmark
Summary:
Create a benchmark for testing memtablereps. This diff is a bit rough, but it should do the trick until other bootcampers can clean it up.
Addressing comments
Removed the mutexes
Changed ReadWriteBenchmark to fix number of reads and count the number of writes we can perform in that time.
Test Plan:
Run it.
Below runs pass
./memtablerep_bench --benchmarks fillrandom,readrandom --memtablerep skiplist
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep skiplist
./memtablerep_bench --benchmarks readwrite,seqreadwrite --memtablerep skiplist --num_operations 200 --num_threads 5
./memtablerep_bench --benchmarks fillrandom,readrandom --memtablerep hashskiplist
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep hashskiplist
--num_scans 2
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep vector
Reviewers: jpaton, ikabiljo, sdong
Reviewed By: sdong
Subscribers: dhruba, ameyag
Differential Revision: https://reviews.facebook.net/D22683
2015-01-07 23:15:30 +00:00
|
|
|
PROGRAMS = db_bench signal_test table_reader_bench log_and_apply_bench cache_bench perf_context_test memtablerep_bench $(TOOLS)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-16 03:53:21 +00:00
|
|
|
# The library name is configurable since we are maintaining libraries of both
|
|
|
|
# debug/release mode.
|
2014-01-29 19:35:05 +00:00
|
|
|
ifeq ($(LIBNAME),)
|
|
|
|
LIBNAME=librocksdb
|
|
|
|
endif
|
2013-08-16 03:53:21 +00:00
|
|
|
LIBRARY = ${LIBNAME}.a
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2014-10-02 18:07:45 +00:00
|
|
|
ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
|
|
|
|
ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
|
|
|
|
ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
|
Package generation for Ubuntu and CentOS
Summary:
I put together a script to assist in the generation of deb's and
rpm's. I've tested that this works on ubuntu via vagrant. I've included the
Vagrantfile here, but I can remove it if it's not useful. The package.sh
script should work on any ubuntu or centos machine, I just added a bit of
logic in there to allow a base Ubuntu or Centos machine to be able to build
RocksDB from scratch.
Example output on Ubuntu 14.04:
```
root@vagrant-ubuntu-trusty-64:/vagrant# ./tools/package.sh
[+] g++-4.7 is already installed. skipping.
[+] libgflags-dev is already installed. skipping.
[+] ruby-all-dev is already installed. skipping.
[+] fpm is already installed. skipping.
Created package {:path=>"rocksdb_3.5_amd64.deb"}
root@vagrant-ubuntu-trusty-64:/vagrant# dpkg --info rocksdb_3.5_amd64.deb
new debian package, version 2.0.
size 17392022 bytes: control archive=1518 bytes.
275 bytes, 11 lines control
2911 bytes, 38 lines md5sums
Package: rocksdb
Version: 3.5
License: BSD
Vendor: Facebook
Architecture: amd64
Maintainer: rocksdb@fb.com
Installed-Size: 83358
Section: default
Priority: extra
Homepage: http://rocksdb.org/
Description: RocksDB is an embeddable persistent key-value store for fast storage.
```
Example output on CentOS 6.5:
```
[root@localhost vagrant]# rpm -qip rocksdb-3.5-1.x86_64.rpm
Name : rocksdb Relocations: /usr
Version : 3.5 Vendor: Facebook
Release : 1 Build Date: Mon 29 Sep 2014 01:26:11 AM UTC
Install Date: (not installed) Build Host: localhost
Group : default Source RPM: rocksdb-3.5-1.src.rpm
Size : 96231106 License: BSD
Signature : (none)
Packager : rocksdb@fb.com
URL : http://rocksdb.org/
Summary : RocksDB is an embeddable persistent key-value store for fast storage.
Description :
RocksDB is an embeddable persistent key-value store for fast storage.
```
Test Plan:
How this gets used is really up to the RocksDB core team. If you
want to actually get this into mainline, you might have to change `make
install` such that it install the RocksDB shared object file as well, which
would require you to link against gflags (maybe?) and that would require some
potential modifications to the script here (basically add a depends on that
package).
Currently, this will install the headers and a pre-compiled statically linked
object file. If that's what you want out of life, than this requires no
modifications.
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Differential Revision: https://reviews.facebook.net/D24141
2014-09-29 23:09:46 +00:00
|
|
|
|
2012-03-30 20:15:49 +00:00
|
|
|
default: all
|
|
|
|
|
2013-10-23 05:38:49 +00:00
|
|
|
#-----------------------------------------------
|
|
|
|
# Create platform independent shared libraries.
|
|
|
|
#-----------------------------------------------
|
2012-03-30 20:15:49 +00:00
|
|
|
ifneq ($(PLATFORM_SHARED_EXT),)
|
2012-08-27 06:45:35 +00:00
|
|
|
|
|
|
|
ifneq ($(PLATFORM_SHARED_VERSIONED),true)
|
2013-08-16 03:53:21 +00:00
|
|
|
SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT)
|
2012-08-27 06:45:35 +00:00
|
|
|
SHARED2 = $(SHARED1)
|
|
|
|
SHARED3 = $(SHARED1)
|
2014-10-02 18:59:22 +00:00
|
|
|
SHARED4 = $(SHARED1)
|
2012-08-27 06:45:35 +00:00
|
|
|
SHARED = $(SHARED1)
|
|
|
|
else
|
Package generation for Ubuntu and CentOS
Summary:
I put together a script to assist in the generation of deb's and
rpm's. I've tested that this works on ubuntu via vagrant. I've included the
Vagrantfile here, but I can remove it if it's not useful. The package.sh
script should work on any ubuntu or centos machine, I just added a bit of
logic in there to allow a base Ubuntu or Centos machine to be able to build
RocksDB from scratch.
Example output on Ubuntu 14.04:
```
root@vagrant-ubuntu-trusty-64:/vagrant# ./tools/package.sh
[+] g++-4.7 is already installed. skipping.
[+] libgflags-dev is already installed. skipping.
[+] ruby-all-dev is already installed. skipping.
[+] fpm is already installed. skipping.
Created package {:path=>"rocksdb_3.5_amd64.deb"}
root@vagrant-ubuntu-trusty-64:/vagrant# dpkg --info rocksdb_3.5_amd64.deb
new debian package, version 2.0.
size 17392022 bytes: control archive=1518 bytes.
275 bytes, 11 lines control
2911 bytes, 38 lines md5sums
Package: rocksdb
Version: 3.5
License: BSD
Vendor: Facebook
Architecture: amd64
Maintainer: rocksdb@fb.com
Installed-Size: 83358
Section: default
Priority: extra
Homepage: http://rocksdb.org/
Description: RocksDB is an embeddable persistent key-value store for fast storage.
```
Example output on CentOS 6.5:
```
[root@localhost vagrant]# rpm -qip rocksdb-3.5-1.x86_64.rpm
Name : rocksdb Relocations: /usr
Version : 3.5 Vendor: Facebook
Release : 1 Build Date: Mon 29 Sep 2014 01:26:11 AM UTC
Install Date: (not installed) Build Host: localhost
Group : default Source RPM: rocksdb-3.5-1.src.rpm
Size : 96231106 License: BSD
Signature : (none)
Packager : rocksdb@fb.com
URL : http://rocksdb.org/
Summary : RocksDB is an embeddable persistent key-value store for fast storage.
Description :
RocksDB is an embeddable persistent key-value store for fast storage.
```
Test Plan:
How this gets used is really up to the RocksDB core team. If you
want to actually get this into mainline, you might have to change `make
install` such that it install the RocksDB shared object file as well, which
would require you to link against gflags (maybe?) and that would require some
potential modifications to the script here (basically add a depends on that
package).
Currently, this will install the headers and a pre-compiled statically linked
object file. If that's what you want out of life, than this requires no
modifications.
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Differential Revision: https://reviews.facebook.net/D24141
2014-09-29 23:09:46 +00:00
|
|
|
SHARED_MAJOR = $(ROCKSDB_MAJOR)
|
|
|
|
SHARED_MINOR = $(ROCKSDB_MINOR)
|
2014-10-02 18:59:22 +00:00
|
|
|
SHARED_PATCH = $(ROCKSDB_PATCH)
|
2013-08-16 03:53:21 +00:00
|
|
|
SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT)
|
2012-03-30 20:15:49 +00:00
|
|
|
SHARED2 = $(SHARED1).$(SHARED_MAJOR)
|
|
|
|
SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
|
2014-10-02 18:59:22 +00:00
|
|
|
SHARED4 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR).$(SHARED_PATCH)
|
|
|
|
SHARED = $(SHARED1) $(SHARED2) $(SHARED3) $(SHARED4)
|
|
|
|
$(SHARED1): $(SHARED4)
|
|
|
|
ln -fs $(SHARED4) $(SHARED1)
|
|
|
|
$(SHARED2): $(SHARED4)
|
|
|
|
ln -fs $(SHARED4) $(SHARED2)
|
|
|
|
$(SHARED3): $(SHARED4)
|
|
|
|
ln -fs $(SHARED4) $(SHARED3)
|
2012-03-30 20:15:49 +00:00
|
|
|
endif
|
|
|
|
|
2014-10-02 18:59:22 +00:00
|
|
|
$(SHARED4):
|
build: fix missing dependency problems
Summary:
Any time one would modify a dependent of any *test*.cc file,
"make" would fail to rebuild the affected test binaries,
e.g., db_test. That was due to the fact that we deliberately
excluded those test-related files from the definition of SOURCES
and only $(SOURCES) was used to create the automatically-generated
.d dependency files. The fix is to generate a .d file for every
source file.
* src.mk: New file. Defines LIB_SOURCES, MOCK_SOURCES
and TEST_BENCH_SOURCES.
* Makefile: Include src.mk.
Reflect s/SOURCES/LIB_SOURCES/ renaming.
* build_tools/build_detect_platform: Remove the code
that was used to generate SOURCES= and MOCK_SOURCES=
definitions in make_config.mk. Those lists of files
are now hard-coded in src.mk. Hard-coding this list of
sources is desirable, because without that, one risks
including stray .cc files in a build. Not reproducible.
Test Plan:
Touch a file used by db_test's dependent .o files and ensure that
they are all recompiled. Before, none would be:
$ touch db/db_impl.h && make db_test
CC db/db_test.o
CC db/column_family.o
CC db/db_filesnapshot.o
CC db/db_impl.o
CC db/db_impl_debug.o
CC db/db_impl_readonly.o
CC db/forward_iterator.o
CC db/internal_stats.o
CC db/managed_iterator.o
CC db/repair.o
CC db/write_batch.o
CC utilities/compacted_db/compacted_db_impl.o
CC utilities/ttl/db_ttl_impl.o
CC util/ldb_cmd.o
CC util/ldb_tool.o
CC util/sst_dump_tool.o
CC util/xfunc.o
CCLD db_test
Reviewers: ljin, igor.sugak, igor, rven, sdong
Reviewed By: sdong
Subscribers: yhchiang, adamretter, fyrz, dhruba
Differential Revision: https://reviews.facebook.net/D33849
2015-02-21 01:42:16 +00:00
|
|
|
$(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED2) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(LIB_SOURCES) $(LDFLAGS) -o $@
|
2012-08-27 06:45:35 +00:00
|
|
|
|
|
|
|
endif # PLATFORM_SHARED_EXT
|
|
|
|
|
Package generation for Ubuntu and CentOS
Summary:
I put together a script to assist in the generation of deb's and
rpm's. I've tested that this works on ubuntu via vagrant. I've included the
Vagrantfile here, but I can remove it if it's not useful. The package.sh
script should work on any ubuntu or centos machine, I just added a bit of
logic in there to allow a base Ubuntu or Centos machine to be able to build
RocksDB from scratch.
Example output on Ubuntu 14.04:
```
root@vagrant-ubuntu-trusty-64:/vagrant# ./tools/package.sh
[+] g++-4.7 is already installed. skipping.
[+] libgflags-dev is already installed. skipping.
[+] ruby-all-dev is already installed. skipping.
[+] fpm is already installed. skipping.
Created package {:path=>"rocksdb_3.5_amd64.deb"}
root@vagrant-ubuntu-trusty-64:/vagrant# dpkg --info rocksdb_3.5_amd64.deb
new debian package, version 2.0.
size 17392022 bytes: control archive=1518 bytes.
275 bytes, 11 lines control
2911 bytes, 38 lines md5sums
Package: rocksdb
Version: 3.5
License: BSD
Vendor: Facebook
Architecture: amd64
Maintainer: rocksdb@fb.com
Installed-Size: 83358
Section: default
Priority: extra
Homepage: http://rocksdb.org/
Description: RocksDB is an embeddable persistent key-value store for fast storage.
```
Example output on CentOS 6.5:
```
[root@localhost vagrant]# rpm -qip rocksdb-3.5-1.x86_64.rpm
Name : rocksdb Relocations: /usr
Version : 3.5 Vendor: Facebook
Release : 1 Build Date: Mon 29 Sep 2014 01:26:11 AM UTC
Install Date: (not installed) Build Host: localhost
Group : default Source RPM: rocksdb-3.5-1.src.rpm
Size : 96231106 License: BSD
Signature : (none)
Packager : rocksdb@fb.com
URL : http://rocksdb.org/
Summary : RocksDB is an embeddable persistent key-value store for fast storage.
Description :
RocksDB is an embeddable persistent key-value store for fast storage.
```
Test Plan:
How this gets used is really up to the RocksDB core team. If you
want to actually get this into mainline, you might have to change `make
install` such that it install the RocksDB shared object file as well, which
would require you to link against gflags (maybe?) and that would require some
potential modifications to the script here (basically add a depends on that
package).
Currently, this will install the headers and a pre-compiled statically linked
object file. If that's what you want out of life, than this requires no
modifications.
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Differential Revision: https://reviews.facebook.net/D24141
2014-09-29 23:09:46 +00:00
|
|
|
.PHONY: blackbox_crash_test check clean coverage crash_test ldb_tests package \
|
2014-04-04 20:11:44 +00:00
|
|
|
release tags valgrind_check whitebox_crash_test format static_lib shared_lib all \
|
2015-02-04 05:43:06 +00:00
|
|
|
dbg rocksdbjavastatic rocksdbjava install uninstall analyze
|
2014-02-07 00:11:35 +00:00
|
|
|
|
2014-04-15 22:59:34 +00:00
|
|
|
all: $(LIBRARY) $(PROGRAMS) $(TESTS)
|
2013-01-24 19:45:11 +00:00
|
|
|
|
2014-04-04 20:11:44 +00:00
|
|
|
static_lib: $(LIBRARY)
|
|
|
|
|
|
|
|
shared_lib: $(SHARED)
|
|
|
|
|
2014-04-15 22:59:34 +00:00
|
|
|
dbg: $(LIBRARY) $(PROGRAMS) $(TESTS)
|
2013-08-01 20:59:01 +00:00
|
|
|
|
2014-04-15 22:59:34 +00:00
|
|
|
# creates static library and programs
|
2013-01-24 19:45:11 +00:00
|
|
|
release:
|
2013-08-01 20:59:01 +00:00
|
|
|
$(MAKE) clean
|
2014-04-15 22:59:34 +00:00
|
|
|
OPT="-DNDEBUG -O2" $(MAKE) static_lib $(PROGRAMS) -j32
|
2013-08-01 20:59:01 +00:00
|
|
|
|
|
|
|
coverage:
|
|
|
|
$(MAKE) clean
|
2014-02-06 08:11:18 +00:00
|
|
|
COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) all check -j32
|
2013-08-01 20:59:01 +00:00
|
|
|
(cd coverage; ./coverage_test.sh)
|
|
|
|
# Delete intermediate files
|
2013-11-13 04:05:28 +00:00
|
|
|
find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-07 23:51:30 +00:00
|
|
|
check: $(TESTS) ldb
|
2011-03-18 22:37:00 +00:00
|
|
|
for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done
|
2013-12-18 21:37:06 +00:00
|
|
|
python tools/ldb_test.py
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-12-06 00:16:56 +00:00
|
|
|
check_some: $(SUBSET) ldb
|
|
|
|
for t in $(SUBSET); do echo "***** Running $$t"; ./$$t || exit 1; done
|
|
|
|
python tools/ldb_test.py
|
|
|
|
|
2014-05-07 23:51:30 +00:00
|
|
|
ldb_tests: ldb
|
2013-01-11 19:09:23 +00:00
|
|
|
python tools/ldb_test.py
|
|
|
|
|
2014-04-24 16:28:11 +00:00
|
|
|
crash_test: whitebox_crash_test blackbox_crash_test
|
2013-05-24 02:10:13 +00:00
|
|
|
|
|
|
|
blackbox_crash_test: db_stress
|
2013-06-07 18:06:20 +00:00
|
|
|
python -u tools/db_crashtest.py
|
2013-05-24 02:10:13 +00:00
|
|
|
|
|
|
|
whitebox_crash_test: db_stress
|
2013-06-07 18:06:20 +00:00
|
|
|
python -u tools/db_crashtest2.py
|
2013-04-05 20:44:59 +00:00
|
|
|
|
2013-11-20 00:33:24 +00:00
|
|
|
asan_check:
|
|
|
|
$(MAKE) clean
|
|
|
|
COMPILE_WITH_ASAN=1 $(MAKE) check -j32
|
2013-11-20 00:44:40 +00:00
|
|
|
$(MAKE) clean
|
|
|
|
|
|
|
|
asan_crash_test:
|
|
|
|
$(MAKE) clean
|
2014-04-21 22:42:04 +00:00
|
|
|
COMPILE_WITH_ASAN=1 $(MAKE) crash_test
|
2013-11-20 00:33:24 +00:00
|
|
|
$(MAKE) clean
|
|
|
|
|
2013-02-22 01:10:33 +00:00
|
|
|
valgrind_check: all $(PROGRAMS) $(TESTS)
|
2013-08-14 20:29:05 +00:00
|
|
|
mkdir -p $(VALGRIND_DIR)
|
2013-02-25 19:17:26 +00:00
|
|
|
echo TESTS THAT HAVE VALGRIND ERRORS > $(VALGRIND_DIR)/valgrind_failed_tests; \
|
|
|
|
echo TIMES in seconds TAKEN BY TESTS ON VALGRIND > $(VALGRIND_DIR)/valgrind_tests_times; \
|
|
|
|
for t in $(filter-out skiplist_test,$(TESTS)); do \
|
|
|
|
stime=`date '+%s'`; \
|
2013-03-06 23:12:38 +00:00
|
|
|
$(VALGRIND_VER) $(VALGRIND_OPTS) ./$$t; \
|
2013-02-25 19:17:26 +00:00
|
|
|
if [ $$? -eq $(VALGRIND_ERROR) ] ; then \
|
|
|
|
echo $$t >> $(VALGRIND_DIR)/valgrind_failed_tests; \
|
|
|
|
fi; \
|
|
|
|
etime=`date '+%s'`; \
|
|
|
|
echo $$t $$((etime - stime)) >> $(VALGRIND_DIR)/valgrind_tests_times; \
|
|
|
|
done
|
2013-02-22 01:10:33 +00:00
|
|
|
|
2015-02-24 01:45:25 +00:00
|
|
|
analyze: clean
|
|
|
|
$(CLANG_SCAN_BUILD) --use-analyzer=$(CLANG_ANALYZER) \
|
|
|
|
--use-c++=$(CXX) --use-cc=$(CC) --status-bugs \
|
|
|
|
-o $(CURDIR)/scan_build_report \
|
|
|
|
$(MAKE) dbg
|
2015-02-04 05:43:06 +00:00
|
|
|
|
2014-08-13 00:13:15 +00:00
|
|
|
unity.cc:
|
2015-03-19 02:12:23 +00:00
|
|
|
rm -f $@ $@-t
|
|
|
|
for source_file in $(LIB_SOURCES); do \
|
|
|
|
echo "#include <$$source_file>" >> $@-t; \
|
|
|
|
done
|
|
|
|
echo 'int main(int argc, char** argv){ return 0; }' >> $@-t
|
|
|
|
chmod a=r $@-t
|
|
|
|
mv $@-t $@
|
2014-08-07 17:05:04 +00:00
|
|
|
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
unity: unity.o
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-08-07 17:05:04 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
clean:
|
2015-02-24 01:45:25 +00:00
|
|
|
rm -f $(PROGRAMS) $(TESTS) $(LIBRARY) $(SHARED) make_config.mk unity.cc
|
|
|
|
rm -rf ios-x86 ios-arm scan_build_report
|
|
|
|
find . -name "*.[oda]" -exec rm {} \;
|
|
|
|
find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
|
|
|
|
rm -rf bzip2* snappy* zlib*
|
|
|
|
|
2013-08-14 20:29:05 +00:00
|
|
|
tags:
|
|
|
|
ctags * -R
|
|
|
|
cscope -b `find . -name '*.cc'` `find . -name '*.h'`
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2014-01-14 08:39:42 +00:00
|
|
|
format:
|
|
|
|
build_tools/format-diff.sh
|
|
|
|
|
Package generation for Ubuntu and CentOS
Summary:
I put together a script to assist in the generation of deb's and
rpm's. I've tested that this works on ubuntu via vagrant. I've included the
Vagrantfile here, but I can remove it if it's not useful. The package.sh
script should work on any ubuntu or centos machine, I just added a bit of
logic in there to allow a base Ubuntu or Centos machine to be able to build
RocksDB from scratch.
Example output on Ubuntu 14.04:
```
root@vagrant-ubuntu-trusty-64:/vagrant# ./tools/package.sh
[+] g++-4.7 is already installed. skipping.
[+] libgflags-dev is already installed. skipping.
[+] ruby-all-dev is already installed. skipping.
[+] fpm is already installed. skipping.
Created package {:path=>"rocksdb_3.5_amd64.deb"}
root@vagrant-ubuntu-trusty-64:/vagrant# dpkg --info rocksdb_3.5_amd64.deb
new debian package, version 2.0.
size 17392022 bytes: control archive=1518 bytes.
275 bytes, 11 lines control
2911 bytes, 38 lines md5sums
Package: rocksdb
Version: 3.5
License: BSD
Vendor: Facebook
Architecture: amd64
Maintainer: rocksdb@fb.com
Installed-Size: 83358
Section: default
Priority: extra
Homepage: http://rocksdb.org/
Description: RocksDB is an embeddable persistent key-value store for fast storage.
```
Example output on CentOS 6.5:
```
[root@localhost vagrant]# rpm -qip rocksdb-3.5-1.x86_64.rpm
Name : rocksdb Relocations: /usr
Version : 3.5 Vendor: Facebook
Release : 1 Build Date: Mon 29 Sep 2014 01:26:11 AM UTC
Install Date: (not installed) Build Host: localhost
Group : default Source RPM: rocksdb-3.5-1.src.rpm
Size : 96231106 License: BSD
Signature : (none)
Packager : rocksdb@fb.com
URL : http://rocksdb.org/
Summary : RocksDB is an embeddable persistent key-value store for fast storage.
Description :
RocksDB is an embeddable persistent key-value store for fast storage.
```
Test Plan:
How this gets used is really up to the RocksDB core team. If you
want to actually get this into mainline, you might have to change `make
install` such that it install the RocksDB shared object file as well, which
would require you to link against gflags (maybe?) and that would require some
potential modifications to the script here (basically add a depends on that
package).
Currently, this will install the headers and a pre-compiled statically linked
object file. If that's what you want out of life, than this requires no
modifications.
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Differential Revision: https://reviews.facebook.net/D24141
2014-09-29 23:09:46 +00:00
|
|
|
package:
|
|
|
|
bash build_tools/make_package.sh $(SHARED_MAJOR).$(SHARED_MINOR)
|
|
|
|
|
2013-08-14 20:29:05 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Unit tests and tools
|
|
|
|
# ---------------------------------------------------------------------------
|
2011-05-28 00:53:58 +00:00
|
|
|
$(LIBRARY): $(LIBOBJECTS)
|
2015-02-19 19:48:09 +00:00
|
|
|
$(AM_V_AR)rm -f $@
|
|
|
|
$(AM_V_at)$(AR) $(ARFLAGS) $@ $(LIBOBJECTS)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-21 20:01:50 +00:00
|
|
|
db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-09-05 22:55:43 +00:00
|
|
|
cache_bench: util/cache_bench.o $(LIBOBJECTS) $(TESTUTIL)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Memtablerep Benchmark
Summary:
Create a benchmark for testing memtablereps. This diff is a bit rough, but it should do the trick until other bootcampers can clean it up.
Addressing comments
Removed the mutexes
Changed ReadWriteBenchmark to fix number of reads and count the number of writes we can perform in that time.
Test Plan:
Run it.
Below runs pass
./memtablerep_bench --benchmarks fillrandom,readrandom --memtablerep skiplist
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep skiplist
./memtablerep_bench --benchmarks readwrite,seqreadwrite --memtablerep skiplist --num_operations 200 --num_threads 5
./memtablerep_bench --benchmarks fillrandom,readrandom --memtablerep hashskiplist
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep hashskiplist
--num_scans 2
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep vector
Reviewers: jpaton, ikabiljo, sdong
Reviewed By: sdong
Subscribers: dhruba, ameyag
Differential Revision: https://reviews.facebook.net/D22683
2015-01-07 23:15:30 +00:00
|
|
|
memtablerep_bench: db/memtablerep_bench.o $(LIBOBJECTS) $(TESTUTIL)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
Memtablerep Benchmark
Summary:
Create a benchmark for testing memtablereps. This diff is a bit rough, but it should do the trick until other bootcampers can clean it up.
Addressing comments
Removed the mutexes
Changed ReadWriteBenchmark to fix number of reads and count the number of writes we can perform in that time.
Test Plan:
Run it.
Below runs pass
./memtablerep_bench --benchmarks fillrandom,readrandom --memtablerep skiplist
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep skiplist
./memtablerep_bench --benchmarks readwrite,seqreadwrite --memtablerep skiplist --num_operations 200 --num_threads 5
./memtablerep_bench --benchmarks fillrandom,readrandom --memtablerep hashskiplist
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep hashskiplist
--num_scans 2
./memtablerep_bench --benchmarks fillseq,readseq --memtablerep vector
Reviewers: jpaton, ikabiljo, sdong
Reviewed By: sdong
Subscribers: dhruba, ameyag
Differential Revision: https://reviews.facebook.net/D22683
2015-01-07 23:15:30 +00:00
|
|
|
|
2014-02-18 22:58:55 +00:00
|
|
|
block_hash_index_test: table/block_hash_index_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-02-18 22:58:55 +00:00
|
|
|
|
2012-10-03 16:58:45 +00:00
|
|
|
db_stress: tools/db_stress.o $(LIBOBJECTS) $(TESTUTIL)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-10-03 16:58:45 +00:00
|
|
|
|
2014-03-06 19:36:39 +00:00
|
|
|
db_sanity_test: tools/db_sanity_test.o $(LIBOBJECTS) $(TESTUTIL)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-10-03 16:58:45 +00:00
|
|
|
|
2012-12-11 19:57:35 +00:00
|
|
|
db_repl_stress: tools/db_repl_stress.o $(LIBOBJECTS) $(TESTUTIL)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-12-11 19:57:35 +00:00
|
|
|
|
2013-04-11 17:54:35 +00:00
|
|
|
signal_test: util/signal_test.o $(LIBOBJECTS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-04-11 17:54:35 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-12-12 23:32:56 +00:00
|
|
|
autovector_test: util/autovector_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-12-12 23:32:56 +00:00
|
|
|
|
2014-01-02 17:08:12 +00:00
|
|
|
column_family_test: db/column_family_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-01-02 17:08:12 +00:00
|
|
|
|
2013-11-20 00:29:42 +00:00
|
|
|
table_properties_collector_test: db/table_properties_collector_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-10-16 18:50:50 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-11-27 22:27:02 +00:00
|
|
|
dynamic_bloom_test: util/dynamic_bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-11-27 22:27:02 +00:00
|
|
|
|
2011-08-05 20:40:49 +00:00
|
|
|
c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-05-10 17:40:10 +00:00
|
|
|
stringappend_test: utilities/merge_operators/string_append/stringappend_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-05-10 17:40:10 +00:00
|
|
|
|
2013-06-11 18:19:49 +00:00
|
|
|
redis_test: utilities/redis/redis_lists_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-06-11 18:19:49 +00:00
|
|
|
|
2014-04-21 20:01:50 +00:00
|
|
|
benchharness_test: util/benchharness_test.o $(LIBOBJECTS) $(TESTHARNESS) $(BENCHHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-04-21 19:29:55 +00:00
|
|
|
|
2013-01-29 20:23:31 +00:00
|
|
|
histogram_test: util/histogram_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-01-29 20:23:31 +00:00
|
|
|
|
2014-02-26 01:47:37 +00:00
|
|
|
thread_local_test: util/thread_local_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-02-26 01:47:37 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-01-21 19:09:56 +00:00
|
|
|
slice_transform_test: util/slice_transform_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2015-01-21 19:09:56 +00:00
|
|
|
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-07-16 21:51:43 +00:00
|
|
|
db_iter_test: db/db_iter_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-07-16 21:51:43 +00:00
|
|
|
|
2014-04-21 20:01:50 +00:00
|
|
|
log_write_bench: util/log_write_bench.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK) -pg
|
2014-02-19 22:55:34 +00:00
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
plain_table_db_test: db/plain_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2014-10-29 20:49:45 +00:00
|
|
|
comparator_db_test: db/comparator_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-10-29 20:49:45 +00:00
|
|
|
|
2014-04-21 20:01:50 +00:00
|
|
|
table_reader_bench: table/table_reader_bench.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK) -pg
|
2013-10-31 20:38:54 +00:00
|
|
|
|
2014-05-05 18:11:48 +00:00
|
|
|
log_and_apply_bench: db/log_and_apply_bench.o $(LIBOBJECTS) $(TESTHARNESS) $(BENCHHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK) -pg
|
2014-05-05 18:11:48 +00:00
|
|
|
|
2013-08-13 06:59:04 +00:00
|
|
|
perf_context_test: db/perf_context_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
$(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
|
2013-08-13 06:59:04 +00:00
|
|
|
|
2013-10-23 04:59:44 +00:00
|
|
|
prefix_test: db/prefix_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
$(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
|
2013-10-23 04:59:44 +00:00
|
|
|
|
[RocksDB] BackupableDB
Summary:
In this diff I present you BackupableDB v1. You can easily use it to backup your DB and it will do incremental snapshots for you.
Let's first describe how you would use BackupableDB. It's inheriting StackableDB interface so you can easily construct it with your DB object -- it will add a method RollTheSnapshot() to the DB object. When you call RollTheSnapshot(), current snapshot of the DB will be stored in the backup dir. To restore, you can just call RestoreDBFromBackup() on a BackupableDB (which is a static method) and it will restore all files from the backup dir. In the next version, it will even support automatic backuping every X minutes.
There are multiple things you can configure:
1. backup_env and db_env can be different, which is awesome because then you can easily backup to HDFS or wherever you feel like.
2. sync - if true, it *guarantees* backup consistency on machine reboot
3. number of snapshots to keep - this will keep last N snapshots around if you want, for some reason, be able to restore from an earlier snapshot. All the backuping is done in incremental fashion - if we already have 00010.sst, we will not copy it again. *IMPORTANT* -- This is based on assumption that 00010.sst never changes - two files named 00010.sst from the same DB will always be exactly the same. Is this true? I always copy manifest, current and log files.
4. You can decide if you want to flush the memtables before you backup, or you're fine with backing up the log files -- either way, you get a complete and consistent view of the database at a time of backup.
5. More things you can find in BackupableDBOptions
Here is the directory structure I use:
backup_dir/CURRENT_SNAPSHOT - just 4 bytes holding the latest snapshot
0, 1, 2, ... - files containing serialized version of each snapshot - containing a list of files
files/*.sst - sst files shared between snapshots - if one snapshot references 00010.sst and another one needs to backup it from the DB, it will just reference the same file
files/ 0/, 1/, 2/, ... - snapshot directories containing private snapshot files - current, manifest and log files
All the files are ref counted and deleted immediatelly when they get out of scope.
Some other stuff in this diff:
1. Added GetEnv() method to the DB. Discussed with @haobo and we agreed that it seems right thing to do.
2. Fixed StackableDB interface. The way it was set up before, I was not able to implement BackupableDB.
Test Plan:
I have a unittest, but please don't look at this yet. I just hacked it up to help me with debugging. I will write a lot of good tests and update the diff.
Also, `make asan_check`
Reviewers: dhruba, haobo, emayanke
Reviewed By: dhruba
CC: leveldb, haobo
Differential Revision: https://reviews.facebook.net/D14295
2013-12-09 22:06:52 +00:00
|
|
|
backupable_db_test: utilities/backupable/backupable_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
[RocksDB] BackupableDB
Summary:
In this diff I present you BackupableDB v1. You can easily use it to backup your DB and it will do incremental snapshots for you.
Let's first describe how you would use BackupableDB. It's inheriting StackableDB interface so you can easily construct it with your DB object -- it will add a method RollTheSnapshot() to the DB object. When you call RollTheSnapshot(), current snapshot of the DB will be stored in the backup dir. To restore, you can just call RestoreDBFromBackup() on a BackupableDB (which is a static method) and it will restore all files from the backup dir. In the next version, it will even support automatic backuping every X minutes.
There are multiple things you can configure:
1. backup_env and db_env can be different, which is awesome because then you can easily backup to HDFS or wherever you feel like.
2. sync - if true, it *guarantees* backup consistency on machine reboot
3. number of snapshots to keep - this will keep last N snapshots around if you want, for some reason, be able to restore from an earlier snapshot. All the backuping is done in incremental fashion - if we already have 00010.sst, we will not copy it again. *IMPORTANT* -- This is based on assumption that 00010.sst never changes - two files named 00010.sst from the same DB will always be exactly the same. Is this true? I always copy manifest, current and log files.
4. You can decide if you want to flush the memtables before you backup, or you're fine with backing up the log files -- either way, you get a complete and consistent view of the database at a time of backup.
5. More things you can find in BackupableDBOptions
Here is the directory structure I use:
backup_dir/CURRENT_SNAPSHOT - just 4 bytes holding the latest snapshot
0, 1, 2, ... - files containing serialized version of each snapshot - containing a list of files
files/*.sst - sst files shared between snapshots - if one snapshot references 00010.sst and another one needs to backup it from the DB, it will just reference the same file
files/ 0/, 1/, 2/, ... - snapshot directories containing private snapshot files - current, manifest and log files
All the files are ref counted and deleted immediatelly when they get out of scope.
Some other stuff in this diff:
1. Added GetEnv() method to the DB. Discussed with @haobo and we agreed that it seems right thing to do.
2. Fixed StackableDB interface. The way it was set up before, I was not able to implement BackupableDB.
Test Plan:
I have a unittest, but please don't look at this yet. I just hacked it up to help me with debugging. I will write a lot of good tests and update the diff.
Also, `make asan_check`
Reviewers: dhruba, haobo, emayanke
Reviewed By: dhruba
CC: leveldb, haobo
Differential Revision: https://reviews.facebook.net/D14295
2013-12-09 22:06:52 +00:00
|
|
|
|
2014-07-10 16:31:42 +00:00
|
|
|
document_db_test: utilities/document/document_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-07-10 16:31:42 +00:00
|
|
|
|
2014-06-20 09:14:14 +00:00
|
|
|
json_document_test: utilities/document/json_document_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-06-20 09:14:14 +00:00
|
|
|
|
SpatialDB
Summary:
This diff is adding spatial index support to RocksDB.
When creating the DB user specifies a list of spatial indexes. Spatial indexes can cover different areas and have different resolution (i.e. number of tiles). This is useful for supporting different zoom levels.
Each element inserted into SpatialDB has:
* a bounding box, which determines how will the element be indexed
* string blob, which will usually be WKB representation of the polygon (http://en.wikipedia.org/wiki/Well-known_text)
* feature set, which is a map of key-value pairs, where value can be int, double, bool, null or a string. FeatureSet will be a set of tags associated with geo elements (for example, 'road': 'highway' and similar)
* a list of indexes to insert the element in. For example, small river element will be inserted in index for high zoom level, while country border will be inserted in all indexes (including the index for low zoom level).
Each query is executed on single spatial index. Query guarantees that it will return all elements intersecting the specified bounding box, but it might also return some extra non-intersecting elements.
Test Plan: Added bunch of unit tests in spatial_db_test
Reviewers: dhruba, yinwang
Reviewed By: yinwang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D20361
2014-07-23 18:22:58 +00:00
|
|
|
spatial_db_test: utilities/spatialdb/spatial_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
SpatialDB
Summary:
This diff is adding spatial index support to RocksDB.
When creating the DB user specifies a list of spatial indexes. Spatial indexes can cover different areas and have different resolution (i.e. number of tiles). This is useful for supporting different zoom levels.
Each element inserted into SpatialDB has:
* a bounding box, which determines how will the element be indexed
* string blob, which will usually be WKB representation of the polygon (http://en.wikipedia.org/wiki/Well-known_text)
* feature set, which is a map of key-value pairs, where value can be int, double, bool, null or a string. FeatureSet will be a set of tags associated with geo elements (for example, 'road': 'highway' and similar)
* a list of indexes to insert the element in. For example, small river element will be inserted in index for high zoom level, while country border will be inserted in all indexes (including the index for low zoom level).
Each query is executed on single spatial index. Query guarantees that it will return all elements intersecting the specified bounding box, but it might also return some extra non-intersecting elements.
Test Plan: Added bunch of unit tests in spatial_db_test
Reviewers: dhruba, yinwang
Reviewed By: yinwang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D20361
2014-07-23 18:22:58 +00:00
|
|
|
|
Timestamp and TTL Wrapper for rocksdb
Summary:
When opened with DBTimestamp::Open call, timestamps are prepended to and stripped from the value during subsequent Put and Get calls respectively. The Timestamp is used to discard values in Get and custom compaction filter which have exceeded their TTL which is specified during Open.
Have made a temporary change to Makefile to let us test with the temporary file TestTime.cc. Have also changed the private members of db_impl.h to protected to let them be inherited by the new class DBTimestamp
Test Plan: make db_timestamp; TestTime.cc(will not check it in) shows how to use the apis currently, but I will write unit-tests shortly
Reviewers: dhruba, vamsi, haobo, sheki, heyongqiang, vkrest
Reviewed By: vamsi
CC: zshao, xjin, vkrest, MarkCallaghan
Differential Revision: https://reviews.facebook.net/D10311
2013-04-15 20:33:13 +00:00
|
|
|
ttl_test: utilities/ttl/ttl_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
Timestamp and TTL Wrapper for rocksdb
Summary:
When opened with DBTimestamp::Open call, timestamps are prepended to and stripped from the value during subsequent Put and Get calls respectively. The Timestamp is used to discard values in Get and custom compaction filter which have exceeded their TTL which is specified during Open.
Have made a temporary change to Makefile to let us test with the temporary file TestTime.cc. Have also changed the private members of db_impl.h to protected to let them be inherited by the new class DBTimestamp
Test Plan: make db_timestamp; TestTime.cc(will not check it in) shows how to use the apis currently, but I will write unit-tests shortly
Reviewers: dhruba, vamsi, haobo, sheki, heyongqiang, vkrest
Reviewed By: vamsi
CC: zshao, xjin, vkrest, MarkCallaghan
Differential Revision: https://reviews.facebook.net/D10311
2013-04-15 20:33:13 +00:00
|
|
|
|
2014-08-18 22:19:17 +00:00
|
|
|
write_batch_with_index_test: utilities/write_batch_with_index/write_batch_with_index_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
Timestamp and TTL Wrapper for rocksdb
Summary:
When opened with DBTimestamp::Open call, timestamps are prepended to and stripped from the value during subsequent Put and Get calls respectively. The Timestamp is used to discard values in Get and custom compaction filter which have exceeded their TTL which is specified during Open.
Have made a temporary change to Makefile to let us test with the temporary file TestTime.cc. Have also changed the private members of db_impl.h to protected to let them be inherited by the new class DBTimestamp
Test Plan: make db_timestamp; TestTime.cc(will not check it in) shows how to use the apis currently, but I will write unit-tests shortly
Reviewers: dhruba, vamsi, haobo, sheki, heyongqiang, vkrest
Reviewed By: vamsi
CC: zshao, xjin, vkrest, MarkCallaghan
Differential Revision: https://reviews.facebook.net/D10311
2013-04-15 20:33:13 +00:00
|
|
|
|
2014-10-28 18:54:33 +00:00
|
|
|
flush_job_test: db/flush_job_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
compaction_job_test: db/compaction_job_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2014-10-30 00:43:37 +00:00
|
|
|
wal_manager_test: db/wal_manager_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-10-30 00:43:37 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
fault_injection_test: db/fault_injection_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2015-01-15 18:28:10 +00:00
|
|
|
|
generic rate limiter
Summary:
A generic rate limiter that can be shared by threads and rocksdb
instances. Will use this to smooth out write traffic generated by
compaction and flush. This will help us get better p99 behavior on flash
storage.
Test Plan:
unit test output
==== Test RateLimiterTest.Rate
request size [1 - 1023], limit 10 KB/sec, actual rate: 10.374969 KB/sec, elapsed 2002265
request size [1 - 2047], limit 20 KB/sec, actual rate: 20.771242 KB/sec, elapsed 2002139
request size [1 - 4095], limit 40 KB/sec, actual rate: 41.285299 KB/sec, elapsed 2202424
request size [1 - 8191], limit 80 KB/sec, actual rate: 81.371605 KB/sec, elapsed 2402558
request size [1 - 16383], limit 160 KB/sec, actual rate: 162.541268 KB/sec, elapsed 3303500
Reviewers: yhchiang, igor, sdong
Reviewed By: sdong
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D19359
2014-07-08 18:41:57 +00:00
|
|
|
rate_limiter_test: util/rate_limiter_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
generic rate limiter
Summary:
A generic rate limiter that can be shared by threads and rocksdb
instances. Will use this to smooth out write traffic generated by
compaction and flush. This will help us get better p99 behavior on flash
storage.
Test Plan:
unit test output
==== Test RateLimiterTest.Rate
request size [1 - 1023], limit 10 KB/sec, actual rate: 10.374969 KB/sec, elapsed 2002265
request size [1 - 2047], limit 20 KB/sec, actual rate: 20.771242 KB/sec, elapsed 2002139
request size [1 - 4095], limit 40 KB/sec, actual rate: 41.285299 KB/sec, elapsed 2202424
request size [1 - 8191], limit 80 KB/sec, actual rate: 81.371605 KB/sec, elapsed 2402558
request size [1 - 16383], limit 160 KB/sec, actual rate: 162.541268 KB/sec, elapsed 3303500
Reviewers: yhchiang, igor, sdong
Reviewed By: sdong
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D19359
2014-07-08 18:41:57 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-09-08 17:37:05 +00:00
|
|
|
block_based_filter_block_test: table/block_based_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-09-08 17:37:05 +00:00
|
|
|
|
|
|
|
full_filter_block_test: table/full_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-12-20 19:05:41 +00:00
|
|
|
block_test: table/block_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-12-20 19:05:41 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
compaction_picker_test: db/compaction_picker_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-10-27 22:49:46 +00:00
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
version_builder_test: db/version_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-10-31 15:48:19 +00:00
|
|
|
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
file_indexer_test: db/file_indexer_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
|
2014-04-15 18:28:52 +00:00
|
|
|
reduce_levels_test: tools/reduce_levels_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-03-21 22:59:47 +00:00
|
|
|
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
write_controller_test: db/write_controller_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
|
2013-07-29 20:26:38 +00:00
|
|
|
merge_test: db/merge_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-09-09 05:24:40 +00:00
|
|
|
merger_test: table/merger_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-09-09 05:24:40 +00:00
|
|
|
|
2013-08-22 21:32:53 +00:00
|
|
|
deletefile_test: db/deletefile_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-08-22 21:32:53 +00:00
|
|
|
|
2013-12-21 01:17:00 +00:00
|
|
|
geodb_test: utilities/geodb/geodb_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-12-21 01:17:00 +00:00
|
|
|
|
2014-07-21 20:26:09 +00:00
|
|
|
cuckoo_table_builder_test: table/cuckoo_table_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-07-21 20:26:09 +00:00
|
|
|
|
2014-07-25 23:37:32 +00:00
|
|
|
cuckoo_table_reader_test: table/cuckoo_table_reader_test.o $(LIBOBJECTS) $(TESTHARNESS) $(BENCHHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-07-25 23:37:32 +00:00
|
|
|
|
2014-08-12 03:21:07 +00:00
|
|
|
cuckoo_table_db_test: db/cuckoo_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-08-12 03:21:07 +00:00
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
listener_test: db/listener_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
|
2014-11-20 18:49:32 +00:00
|
|
|
thread_list_test: util/thread_list_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-11-20 18:49:32 +00:00
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
compactor_test: utilities/compaction/compactor_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
|
2015-03-11 20:06:59 +00:00
|
|
|
compact_files_test: db/compact_files_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
|
|
|
$(AM_LINK)
|
|
|
|
|
2015-02-18 19:42:18 +00:00
|
|
|
options_test: util/options_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-05-16 17:35:41 +00:00
|
|
|
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 17:15:54 +00:00
|
|
|
event_logger_test: util/event_logger_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
|
|
|
$(AM_LINK)
|
|
|
|
|
2014-12-23 21:24:07 +00:00
|
|
|
sst_dump_test: util/sst_dump_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-12-23 21:24:07 +00:00
|
|
|
|
2015-01-30 00:33:11 +00:00
|
|
|
memenv_test : util/memenv_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2011-09-12 09:21:10 +00:00
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
mock_env_test : util/mock_env_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2013-06-17 20:58:17 +00:00
|
|
|
manual_compaction_test: util/manual_compaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-06-17 20:58:17 +00:00
|
|
|
|
2012-08-18 07:26:50 +00:00
|
|
|
filelock_test: util/filelock_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2013-02-05 03:42:40 +00:00
|
|
|
|
|
|
|
auto_roll_logger_test: util/auto_roll_logger_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-08-18 07:26:50 +00:00
|
|
|
|
2014-04-15 18:28:52 +00:00
|
|
|
sst_dump: tools/sst_dump.o $(LIBOBJECTS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-08-17 17:48:40 +00:00
|
|
|
|
2014-04-15 18:29:02 +00:00
|
|
|
ldb: tools/ldb.o $(LIBOBJECTS)
|
2015-02-18 19:43:30 +00:00
|
|
|
$(AM_LINK)
|
2012-08-18 07:26:50 +00:00
|
|
|
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Jni stuff
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
2014-04-18 17:47:03 +00:00
|
|
|
JNI_NATIVE_SOURCES = ./java/rocksjni/*.cc
|
2014-05-04 20:15:33 +00:00
|
|
|
JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/linux
|
2014-09-26 20:57:12 +00:00
|
|
|
ARCH := $(shell getconf LONG_BIT)
|
|
|
|
ROCKSDBJNILIB = librocksdbjni-linux$(ARCH).so
|
2014-10-02 18:07:45 +00:00
|
|
|
ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar
|
2014-10-02 20:46:43 +00:00
|
|
|
ROCKSDB_JAR_ALL = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar
|
2014-10-02 21:13:09 +00:00
|
|
|
ROCKSDB_JAVADOCS_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar
|
2014-10-02 20:57:54 +00:00
|
|
|
ROCKSDB_SOURCES_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
|
|
|
|
ifeq ($(PLATFORM), OS_MACOSX)
|
2014-09-26 20:57:12 +00:00
|
|
|
ROCKSDBJNILIB = librocksdbjni-osx.jnilib
|
2014-10-02 18:07:45 +00:00
|
|
|
ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar
|
2014-10-23 14:39:48 +00:00
|
|
|
ifneq ("$(wildcard $(JAVA_HOME)/include/darwin)","")
|
|
|
|
JAVA_INCLUDE = -I$(JAVA_HOME)/include -I $(JAVA_HOME)/include/darwin
|
|
|
|
else
|
|
|
|
JAVA_INCLUDE = -I/System/Library/Frameworks/JavaVM.framework/Headers/
|
|
|
|
endif
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
endif
|
|
|
|
|
2014-09-09 00:44:52 +00:00
|
|
|
libz.a:
|
|
|
|
-rm -rf zlib-1.2.8
|
2014-08-18 21:03:46 +00:00
|
|
|
curl -O http://zlib.net/zlib-1.2.8.tar.gz
|
|
|
|
tar xvzf zlib-1.2.8.tar.gz
|
|
|
|
cd zlib-1.2.8 && CFLAGS='-fPIC' ./configure --static && make
|
2014-09-29 18:09:09 +00:00
|
|
|
cp zlib-1.2.8/libz.a .
|
2014-09-09 00:44:52 +00:00
|
|
|
|
|
|
|
libbz2.a:
|
|
|
|
-rm -rf bzip2-1.0.6
|
2014-09-29 18:09:09 +00:00
|
|
|
curl -O http://www.bzip.org/1.0.6/bzip2-1.0.6.tar.gz
|
2014-08-18 21:03:46 +00:00
|
|
|
tar xvzf bzip2-1.0.6.tar.gz
|
|
|
|
cd bzip2-1.0.6 && make CFLAGS='-fPIC -Wall -Winline -O2 -g -D_FILE_OFFSET_BITS=64'
|
|
|
|
cp bzip2-1.0.6/libbz2.a .
|
|
|
|
|
2014-09-09 00:44:52 +00:00
|
|
|
libsnappy.a:
|
|
|
|
-rm -rf snappy-1.1.1
|
2014-08-18 21:03:46 +00:00
|
|
|
curl -O https://snappy.googlecode.com/files/snappy-1.1.1.tar.gz
|
|
|
|
tar xvzf snappy-1.1.1.tar.gz
|
2014-09-09 00:44:52 +00:00
|
|
|
cd snappy-1.1.1 && ./configure --with-pic --enable-static
|
2014-08-18 21:03:46 +00:00
|
|
|
cd snappy-1.1.1 && make
|
|
|
|
cp snappy-1.1.1/.libs/libsnappy.a .
|
2014-09-29 18:09:09 +00:00
|
|
|
|
2014-09-09 00:44:52 +00:00
|
|
|
|
|
|
|
rocksdbjavastatic: libz.a libbz2.a libsnappy.a
|
2014-08-18 21:03:46 +00:00
|
|
|
OPT="-fPIC -DNDEBUG -O2" $(MAKE) $(LIBRARY) -j
|
2014-11-19 20:21:21 +00:00
|
|
|
cd java;$(MAKE) javalib;
|
2015-01-31 22:23:59 +00:00
|
|
|
rm -f ./java/target/$(ROCKSDBJNILIB)
|
|
|
|
$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(LIBOBJECTS) $(COVERAGEFLAGS) libz.a libbz2.a libsnappy.a
|
|
|
|
cd java/target;strip -S -x $(ROCKSDBJNILIB)
|
|
|
|
cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
|
|
|
|
cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
|
|
|
|
cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class
|
|
|
|
cd java/target/apidocs;jar -cf ../$(ROCKSDB_JAVADOCS_JAR) *
|
|
|
|
cd java/src/main/java;jar -cf ../../../target/$(ROCKSDB_SOURCES_JAR) org
|
2014-08-18 21:03:46 +00:00
|
|
|
|
2014-09-26 20:57:12 +00:00
|
|
|
rocksdbjavastaticrelease: rocksdbjavastatic
|
2014-09-30 19:03:32 +00:00
|
|
|
cd java/crossbuild && vagrant destroy -f && vagrant up linux32 && vagrant halt linux32 && vagrant up linux64 && vagrant halt linux64
|
2015-01-31 22:23:59 +00:00
|
|
|
cd java;jar -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md
|
|
|
|
cd java/target;jar -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib
|
|
|
|
cd java/target/classes;jar -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class
|
2014-09-26 20:57:12 +00:00
|
|
|
|
2014-10-06 15:20:56 +00:00
|
|
|
rocksdbjavastaticpublish: rocksdbjavastaticrelease
|
2015-01-31 22:23:59 +00:00
|
|
|
mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar -Dclassifier=javadoc
|
|
|
|
mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar -Dclassifier=sources
|
|
|
|
mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux64.jar -Dclassifier=linux64
|
|
|
|
mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux32.jar -Dclassifier=linux32
|
|
|
|
mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar -Dclassifier=osx
|
|
|
|
mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar
|
2014-10-02 20:29:47 +00:00
|
|
|
|
2014-06-22 20:27:22 +00:00
|
|
|
rocksdbjava:
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
OPT="-fPIC -DNDEBUG -O2" $(MAKE) $(LIBRARY) -j32
|
2014-11-19 20:21:21 +00:00
|
|
|
cd java;$(MAKE) javalib;
|
2015-01-31 22:23:59 +00:00
|
|
|
rm -f ./java/target/$(ROCKSDBJNILIB)
|
|
|
|
$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(LIBOBJECTS) $(JAVA_LDFLAGS) $(COVERAGEFLAGS)
|
|
|
|
cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
|
|
|
|
cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
|
|
|
|
cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
|
|
|
|
jclean:
|
|
|
|
cd java;$(MAKE) clean;
|
|
|
|
|
|
|
|
jtest:
|
2014-04-02 20:14:55 +00:00
|
|
|
cd java;$(MAKE) sample;$(MAKE) test;
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
|
2014-04-09 07:48:20 +00:00
|
|
|
jdb_bench:
|
|
|
|
cd java;$(MAKE) db_bench;
|
|
|
|
|
A build option to run through all check-in requirements.
Summary: Make it easier for people to run all the tests.
Test Plan: Run it.
Reviewers: rven, yhchiang, igor, MarkCallaghan, IslamAbdelRahman, igor.sugak, anthony, kradhakrishnan, meyering
Reviewed By: meyering
Subscribers: meyering, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35319
2015-03-19 00:26:29 +00:00
|
|
|
commit-prereq:
|
|
|
|
$(MAKE) clean && $(MAKE) all check;
|
|
|
|
$(MAKE) clean && $(MAKE) rocksdbjava;
|
|
|
|
$(MAKE) clean && USE_CLANG=1 $(MAKE) all;
|
|
|
|
$(MAKE) clean && OPT=-DROCKSDB_LITE $(MAKE) release;
|
|
|
|
|
2013-08-14 20:29:05 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Platform-specific compilation
|
|
|
|
# ---------------------------------------------------------------------------
|
2013-06-05 17:37:38 +00:00
|
|
|
|
2011-05-28 00:53:58 +00:00
|
|
|
ifeq ($(PLATFORM), IOS)
|
|
|
|
# For iOS, create universal object files to be used on both the simulator and
|
|
|
|
# a device.
|
2012-08-27 06:45:35 +00:00
|
|
|
PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms
|
|
|
|
SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer
|
|
|
|
DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer
|
2014-04-04 20:11:44 +00:00
|
|
|
IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString)
|
2011-06-29 22:53:17 +00:00
|
|
|
|
2011-05-28 00:53:58 +00:00
|
|
|
.cc.o:
|
|
|
|
mkdir -p ios-x86/$(dir $@)
|
2014-04-04 20:11:44 +00:00
|
|
|
$(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
|
2011-05-28 00:53:58 +00:00
|
|
|
mkdir -p ios-arm/$(dir $@)
|
2014-04-04 20:11:44 +00:00
|
|
|
xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@
|
2011-05-28 00:53:58 +00:00
|
|
|
lipo ios-x86/$@ ios-arm/$@ -create -output $@
|
2011-08-05 20:40:49 +00:00
|
|
|
|
|
|
|
.c.o:
|
|
|
|
mkdir -p ios-x86/$(dir $@)
|
2014-04-04 20:11:44 +00:00
|
|
|
$(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
|
2011-08-05 20:40:49 +00:00
|
|
|
mkdir -p ios-arm/$(dir $@)
|
2014-04-04 20:11:44 +00:00
|
|
|
xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@
|
2011-08-05 20:40:49 +00:00
|
|
|
lipo ios-x86/$@ ios-arm/$@ -create -output $@
|
|
|
|
|
2011-05-28 00:53:58 +00:00
|
|
|
else
|
2011-03-18 22:37:00 +00:00
|
|
|
.cc.o:
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
$(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS)
|
2011-08-05 20:40:49 +00:00
|
|
|
|
|
|
|
.c.o:
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
$(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
|
2011-05-28 00:53:58 +00:00
|
|
|
endif
|
2013-01-14 20:39:24 +00:00
|
|
|
|
2013-08-14 20:29:05 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Source files dependencies detection
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
# Add proper dependency support so changing a .h file forces a .cc file to
|
|
|
|
# rebuild.
|
|
|
|
|
|
|
|
# The .d file indicates .cc file's dependencies on .h files. We generate such
|
|
|
|
# dependency by g++'s -MM option, whose output is a make dependency rule.
|
2013-01-14 20:39:24 +00:00
|
|
|
%.d: %.cc
|
build: make "make" output readable by default
Summary:
With this change, make now prints a summary line for each
compiler and linker invocation, e.g.,:
CC db/builder.o
CC db/c.o
CC db/column_family.o
To see full commands, insert "V=1" into your make command.
E.g., run "make V=1 all" if you want it to print each command
in its full glory.
$^ is GNU make's abbreviation for the prerequisites of the current target.
These AM_V_... variables expand to some very short string like "CC" or
"LD", by default, so that the output of "make" is readable. If/when you
want more details, just build with "make V=1 ...", and make will print
each full command as it is executed. If you prefer to see the noise
all the time, and only want to optionally see the abbreviated output,
set AM_DEFAULT_VERBOSITY=1 in your environment, and then build with
V=0 to see the abbreviated command indicators.
Test Plan:
invoke make a few different ways and observe:
make clean; make # abbreviated
make clean; make V=0 # also abbreviated
make clean; make V=1 # full detail
Reviewers: sdong, ljin, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D33579
2015-02-18 19:42:54 +00:00
|
|
|
@$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \
|
2015-02-13 19:40:23 +00:00
|
|
|
-MM -MT'$@' -MT'$(<:.cc=.o)' "$<" -o '$@'
|
2013-01-14 20:39:24 +00:00
|
|
|
|
build: fix missing dependency problems
Summary:
Any time one would modify a dependent of any *test*.cc file,
"make" would fail to rebuild the affected test binaries,
e.g., db_test. That was due to the fact that we deliberately
excluded those test-related files from the definition of SOURCES
and only $(SOURCES) was used to create the automatically-generated
.d dependency files. The fix is to generate a .d file for every
source file.
* src.mk: New file. Defines LIB_SOURCES, MOCK_SOURCES
and TEST_BENCH_SOURCES.
* Makefile: Include src.mk.
Reflect s/SOURCES/LIB_SOURCES/ renaming.
* build_tools/build_detect_platform: Remove the code
that was used to generate SOURCES= and MOCK_SOURCES=
definitions in make_config.mk. Those lists of files
are now hard-coded in src.mk. Hard-coding this list of
sources is desirable, because without that, one risks
including stray .cc files in a build. Not reproducible.
Test Plan:
Touch a file used by db_test's dependent .o files and ensure that
they are all recompiled. Before, none would be:
$ touch db/db_impl.h && make db_test
CC db/db_test.o
CC db/column_family.o
CC db/db_filesnapshot.o
CC db/db_impl.o
CC db/db_impl_debug.o
CC db/db_impl_readonly.o
CC db/forward_iterator.o
CC db/internal_stats.o
CC db/managed_iterator.o
CC db/repair.o
CC db/write_batch.o
CC utilities/compacted_db/compacted_db_impl.o
CC utilities/ttl/db_ttl_impl.o
CC util/ldb_cmd.o
CC util/ldb_tool.o
CC util/sst_dump_tool.o
CC util/xfunc.o
CCLD db_test
Reviewers: ljin, igor.sugak, igor, rven, sdong
Reviewed By: sdong
Subscribers: yhchiang, adamretter, fyrz, dhruba
Differential Revision: https://reviews.facebook.net/D33849
2015-02-21 01:42:16 +00:00
|
|
|
all_sources = $(LIB_SOURCES) $(TEST_BENCH_SOURCES) $(MOCK_SOURCES)
|
|
|
|
DEPFILES = $(all_sources:.cc=.d)
|
2013-01-14 20:39:24 +00:00
|
|
|
|
|
|
|
depend: $(DEPFILES)
|
|
|
|
|
2014-01-14 08:39:42 +00:00
|
|
|
# if the make goal is either "clean" or "format", we shouldn't
|
|
|
|
# try to import the *.d files.
|
|
|
|
# TODO(kailiu) The unfamiliarity of Make's conditions leads to the ugly
|
|
|
|
# working solution.
|
2013-01-14 20:39:24 +00:00
|
|
|
ifneq ($(MAKECMDGOALS),clean)
|
2014-01-14 08:39:42 +00:00
|
|
|
ifneq ($(MAKECMDGOALS),format)
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
ifneq ($(MAKECMDGOALS),jclean)
|
|
|
|
ifneq ($(MAKECMDGOALS),jtest)
|
Package generation for Ubuntu and CentOS
Summary:
I put together a script to assist in the generation of deb's and
rpm's. I've tested that this works on ubuntu via vagrant. I've included the
Vagrantfile here, but I can remove it if it's not useful. The package.sh
script should work on any ubuntu or centos machine, I just added a bit of
logic in there to allow a base Ubuntu or Centos machine to be able to build
RocksDB from scratch.
Example output on Ubuntu 14.04:
```
root@vagrant-ubuntu-trusty-64:/vagrant# ./tools/package.sh
[+] g++-4.7 is already installed. skipping.
[+] libgflags-dev is already installed. skipping.
[+] ruby-all-dev is already installed. skipping.
[+] fpm is already installed. skipping.
Created package {:path=>"rocksdb_3.5_amd64.deb"}
root@vagrant-ubuntu-trusty-64:/vagrant# dpkg --info rocksdb_3.5_amd64.deb
new debian package, version 2.0.
size 17392022 bytes: control archive=1518 bytes.
275 bytes, 11 lines control
2911 bytes, 38 lines md5sums
Package: rocksdb
Version: 3.5
License: BSD
Vendor: Facebook
Architecture: amd64
Maintainer: rocksdb@fb.com
Installed-Size: 83358
Section: default
Priority: extra
Homepage: http://rocksdb.org/
Description: RocksDB is an embeddable persistent key-value store for fast storage.
```
Example output on CentOS 6.5:
```
[root@localhost vagrant]# rpm -qip rocksdb-3.5-1.x86_64.rpm
Name : rocksdb Relocations: /usr
Version : 3.5 Vendor: Facebook
Release : 1 Build Date: Mon 29 Sep 2014 01:26:11 AM UTC
Install Date: (not installed) Build Host: localhost
Group : default Source RPM: rocksdb-3.5-1.src.rpm
Size : 96231106 License: BSD
Signature : (none)
Packager : rocksdb@fb.com
URL : http://rocksdb.org/
Summary : RocksDB is an embeddable persistent key-value store for fast storage.
Description :
RocksDB is an embeddable persistent key-value store for fast storage.
```
Test Plan:
How this gets used is really up to the RocksDB core team. If you
want to actually get this into mainline, you might have to change `make
install` such that it install the RocksDB shared object file as well, which
would require you to link against gflags (maybe?) and that would require some
potential modifications to the script here (basically add a depends on that
package).
Currently, this will install the headers and a pre-compiled statically linked
object file. If that's what you want out of life, than this requires no
modifications.
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Differential Revision: https://reviews.facebook.net/D24141
2014-09-29 23:09:46 +00:00
|
|
|
ifneq ($(MAKECMDGOALS),package)
|
2015-02-04 05:43:06 +00:00
|
|
|
ifneq ($(MAKECMDGOALS),analyze)
|
2013-01-14 20:39:24 +00:00
|
|
|
-include $(DEPFILES)
|
|
|
|
endif
|
2014-01-14 08:39:42 +00:00
|
|
|
endif
|
Add a jni library for rocksdb which supports Open, Get, Put, and Close.
Summary:
This diff contains a simple jni library for rocksdb which supports open, get,
put and closeusing default options (including Options, ReadOptions, and
WriteOptions.) In the usual case, Java developers can use the c++ rocksdb
library in the way similar to the following:
RocksDB db = RocksDB.open(path_to_db);
...
db.put("hello".getBytes(), "world".getBytes();
byte[] value = db.get("hello".getBytes());
...
db.close();
Specifically, this diff has the following major classes:
* RocksDB: a Java wrapper class which forwards the operations
from the java side to c++ rocksdb library.
* RocksDBException: ncapsulates the error of an operation.
This exception type is used to describe an internal error from
the c++ rocksdb library.
This diff also include a simple java sample code calling c++ rocksdb library.
To build the rocksdb jni library, simply run make jni, and make jtest will try to
build and run the sample code.
Note that if the rocksdb is not built with the default glibc that Java uses,
java will try to load the wrong glibc during the run time. As a result,
the sample code might not work properly during the run time.
Test Plan:
* make jni
* make jtest
Reviewers: haobo, dhruba, sdong, igor, ljin
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D17109
2014-03-28 21:19:21 +00:00
|
|
|
endif
|
|
|
|
endif
|
Package generation for Ubuntu and CentOS
Summary:
I put together a script to assist in the generation of deb's and
rpm's. I've tested that this works on ubuntu via vagrant. I've included the
Vagrantfile here, but I can remove it if it's not useful. The package.sh
script should work on any ubuntu or centos machine, I just added a bit of
logic in there to allow a base Ubuntu or Centos machine to be able to build
RocksDB from scratch.
Example output on Ubuntu 14.04:
```
root@vagrant-ubuntu-trusty-64:/vagrant# ./tools/package.sh
[+] g++-4.7 is already installed. skipping.
[+] libgflags-dev is already installed. skipping.
[+] ruby-all-dev is already installed. skipping.
[+] fpm is already installed. skipping.
Created package {:path=>"rocksdb_3.5_amd64.deb"}
root@vagrant-ubuntu-trusty-64:/vagrant# dpkg --info rocksdb_3.5_amd64.deb
new debian package, version 2.0.
size 17392022 bytes: control archive=1518 bytes.
275 bytes, 11 lines control
2911 bytes, 38 lines md5sums
Package: rocksdb
Version: 3.5
License: BSD
Vendor: Facebook
Architecture: amd64
Maintainer: rocksdb@fb.com
Installed-Size: 83358
Section: default
Priority: extra
Homepage: http://rocksdb.org/
Description: RocksDB is an embeddable persistent key-value store for fast storage.
```
Example output on CentOS 6.5:
```
[root@localhost vagrant]# rpm -qip rocksdb-3.5-1.x86_64.rpm
Name : rocksdb Relocations: /usr
Version : 3.5 Vendor: Facebook
Release : 1 Build Date: Mon 29 Sep 2014 01:26:11 AM UTC
Install Date: (not installed) Build Host: localhost
Group : default Source RPM: rocksdb-3.5-1.src.rpm
Size : 96231106 License: BSD
Signature : (none)
Packager : rocksdb@fb.com
URL : http://rocksdb.org/
Summary : RocksDB is an embeddable persistent key-value store for fast storage.
Description :
RocksDB is an embeddable persistent key-value store for fast storage.
```
Test Plan:
How this gets used is really up to the RocksDB core team. If you
want to actually get this into mainline, you might have to change `make
install` such that it install the RocksDB shared object file as well, which
would require you to link against gflags (maybe?) and that would require some
potential modifications to the script here (basically add a depends on that
package).
Currently, this will install the headers and a pre-compiled statically linked
object file. If that's what you want out of life, than this requires no
modifications.
Reviewers: ljin, yhchiang, igor
Reviewed By: igor
Differential Revision: https://reviews.facebook.net/D24141
2014-09-29 23:09:46 +00:00
|
|
|
endif
|
2015-02-04 05:43:06 +00:00
|
|
|
endif
|