mirror of https://github.com/facebook/rocksdb.git
Migrate to docker for CI run (#10496)
Summary: Moved linux builds to using docker to avoid CI instability caused by dependency installation site down. Added the `Dockerfile` which is used to build the image. The build time is also significantly reduced, because no dependencies installation and with using 2xlarge+ instance for slow build (like tsan test). Also fixed a few issues detected while building this: * `DestoryDB()` Status not checked for a few tests * nullptr might be used in `inlineskiplist.cc` Pull Request resolved: https://github.com/facebook/rocksdb/pull/10496 Test Plan: CI Reviewed By: ajkr Differential Revision: D38554200 Pulled By: jay-zhuang fbshipit-source-id: 16e8fb2bf07b9c84bb27fb18421c4d54f2f248fd
This commit is contained in:
parent
a0798f6f92
commit
5d3aefb682
|
@ -3,11 +3,6 @@ version: 2.1
|
||||||
orbs:
|
orbs:
|
||||||
win: circleci/windows@2.4.0
|
win: circleci/windows@2.4.0
|
||||||
|
|
||||||
aliases:
|
|
||||||
- ¬ify-on-main-failure
|
|
||||||
fail_only: true
|
|
||||||
only_for_branches: main
|
|
||||||
|
|
||||||
commands:
|
commands:
|
||||||
install-cmake-on-macos:
|
install-cmake-on-macos:
|
||||||
steps:
|
steps:
|
||||||
|
@ -68,28 +63,14 @@ commands:
|
||||||
path: t.tar.gz
|
path: t.tar.gz
|
||||||
destination: test_logs
|
destination: test_logs
|
||||||
when: on_fail
|
when: on_fail
|
||||||
|
- run: # store core dumps if there's any
|
||||||
install-clang-10:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install Clang 10
|
|
||||||
command: |
|
command: |
|
||||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
mkdir -p /tmp/core_dumps
|
||||||
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list
|
cp core.* /tmp/core_dumps
|
||||||
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list
|
when: on_fail
|
||||||
echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable
|
- store_artifacts:
|
||||||
sudo apt-get update -y && sudo apt-get install -y clang-10
|
path: /tmp/core_dumps
|
||||||
|
when: on_fail
|
||||||
install-clang-13:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install Clang 13
|
|
||||||
command: |
|
|
||||||
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list
|
|
||||||
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list
|
|
||||||
echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable
|
|
||||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
|
||||||
sudo apt-get update -y && sudo apt-get install -y clang-13
|
|
||||||
|
|
||||||
install-gflags:
|
install-gflags:
|
||||||
steps:
|
steps:
|
||||||
|
@ -98,33 +79,6 @@ commands:
|
||||||
command: |
|
command: |
|
||||||
sudo apt-get update -y && sudo apt-get install -y libgflags-dev
|
sudo apt-get update -y && sudo apt-get install -y libgflags-dev
|
||||||
|
|
||||||
install-benchmark:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install ninja build
|
|
||||||
command: sudo apt-get update -y && sudo apt-get install -y ninja-build
|
|
||||||
- run:
|
|
||||||
name: Install benchmark
|
|
||||||
command: |
|
|
||||||
git clone --depth 1 --branch v1.6.1 https://github.com/google/benchmark.git ~/benchmark
|
|
||||||
cd ~/benchmark && mkdir build && cd build
|
|
||||||
cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0
|
|
||||||
ninja && sudo ninja install
|
|
||||||
|
|
||||||
install-valgrind:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install valgrind
|
|
||||||
command: sudo apt-get update -y && sudo apt-get install -y valgrind
|
|
||||||
|
|
||||||
upgrade-cmake:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Upgrade cmake
|
|
||||||
command: |
|
|
||||||
sudo apt remove --purge cmake
|
|
||||||
sudo snap install cmake --classic
|
|
||||||
|
|
||||||
install-gflags-on-macos:
|
install-gflags-on-macos:
|
||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
|
@ -132,48 +86,8 @@ commands:
|
||||||
command: |
|
command: |
|
||||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags
|
HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags
|
||||||
|
|
||||||
install-gtest-parallel:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install gtest-parallel
|
|
||||||
command: |
|
|
||||||
git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel
|
|
||||||
echo 'export PATH=$HOME/gtest-parallel:$PATH' >> $BASH_ENV
|
|
||||||
|
|
||||||
install-compression-libs:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install compression libs
|
|
||||||
command: |
|
|
||||||
sudo apt-get update -y && sudo apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
|
|
||||||
|
|
||||||
install-streaming-compress-libs:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install streaming compression libs
|
|
||||||
command: |
|
|
||||||
sudo apt-get update -y && sudo apt-get install -y libzstd-dev
|
|
||||||
|
|
||||||
install-libprotobuf-mutator:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Install libprotobuf-mutator libs
|
|
||||||
command: |
|
|
||||||
git clone -b v1.0 git@github.com:google/libprotobuf-mutator.git ~/libprotobuf-mutator
|
|
||||||
cd ~/libprotobuf-mutator && git checkout ffd86a32874e5c08a143019aad1aaf0907294c9f && mkdir build && cd build
|
|
||||||
cmake .. -GNinja -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang++-13 -DCMAKE_BUILD_TYPE=Release -DLIB_PROTO_MUTATOR_DOWNLOAD_PROTOBUF=ON
|
|
||||||
ninja && sudo ninja install
|
|
||||||
- run:
|
|
||||||
name: Setup environment variables
|
|
||||||
command: |
|
|
||||||
echo "export PKG_CONFIG_PATH=/usr/local/OFF/:~/libprotobuf-mutator/build/external.protobuf/lib/pkgconfig/" >> $BASH_ENV
|
|
||||||
echo "export PROTOC_BIN=~/libprotobuf-mutator/build/external.protobuf/bin/protoc" >> $BASH_ENV
|
|
||||||
setup-folly:
|
setup-folly:
|
||||||
steps:
|
steps:
|
||||||
- run:
|
|
||||||
name: Install folly dependencies
|
|
||||||
command: |
|
|
||||||
sudo apt-get install libgoogle-glog-dev
|
|
||||||
- run:
|
- run:
|
||||||
name: Checkout folly sources
|
name: Checkout folly sources
|
||||||
command: |
|
command: |
|
||||||
|
@ -221,6 +135,21 @@ executors:
|
||||||
image: 'windows-server-2019-vs2019:stable'
|
image: 'windows-server-2019-vs2019:stable'
|
||||||
resource_class: windows.2xlarge
|
resource_class: windows.2xlarge
|
||||||
shell: bash.exe
|
shell: bash.exe
|
||||||
|
linux-docker:
|
||||||
|
docker:
|
||||||
|
# The image configuration is build_tools/ubuntu20_image/Dockerfile
|
||||||
|
# To update and build the image:
|
||||||
|
# $ cd build_tools/ubuntu20_image
|
||||||
|
# $ docker build -t zjay437/rocksdb:0.5 .
|
||||||
|
# $ docker push zjay437/rocksdb:0.5
|
||||||
|
# `zjay437` is the account name for zjay@meta.com which readwrite token is shared internally. To login:
|
||||||
|
# $ docker login --username zjay437
|
||||||
|
# Or please feel free to change it to your docker hub account for hosting the image, meta employee should already have the account and able to login with SSO.
|
||||||
|
# To avoid impacting the existing CI runs, please bump the version every time creating a new image
|
||||||
|
# to run the CI image environment locally:
|
||||||
|
# $ docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it zjay437/rocksdb:0.5 bash
|
||||||
|
# option `--cap-add=SYS_PTRACE --security-opt seccomp=unconfined` is used to enable gdb to attach an existing process
|
||||||
|
- image: zjay437/rocksdb:0.5
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-macos:
|
build-macos:
|
||||||
|
@ -272,172 +201,132 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux:
|
build-linux:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-streaming-compress-libs
|
|
||||||
- run: make V=1 J=32 -j32 check
|
- run: make V=1 J=32 -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-encrypted_env-no_compression:
|
build-linux-encrypted_env-no_compression:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
|
- run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
|
||||||
- run: |
|
- run: |
|
||||||
./sst_dump --help | egrep -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression
|
./sst_dump --help | egrep -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-shared_lib-alt_namespace-status_checked:
|
build-linux-shared_lib-alt_namespace-status_checked:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check
|
- run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-release:
|
build-linux-release:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- checkout # check out the code in the project directory
|
- checkout # check out the code in the project directory
|
||||||
- run: make V=1 -j32 release
|
- run: make V=1 -j32 release
|
||||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
|
||||||
- install-gflags
|
|
||||||
- run: make V=1 -j32 release
|
|
||||||
- run: ./db_stress --version # ensure with gflags
|
- run: ./db_stress --version # ensure with gflags
|
||||||
|
- run: make clean
|
||||||
|
- run: apt-get remove -y libgflags-dev
|
||||||
|
- run: make V=1 -j32 release
|
||||||
|
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-release-rtti:
|
build-linux-release-rtti:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: xlarge
|
resource_class: xlarge
|
||||||
steps:
|
steps:
|
||||||
- checkout # check out the code in the project directory
|
- checkout # check out the code in the project directory
|
||||||
- run: make clean
|
|
||||||
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
|
||||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
|
||||||
- run: sudo apt-get update -y && sudo apt-get install -y libgflags-dev
|
|
||||||
- run: make clean
|
|
||||||
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
||||||
- run: ./db_stress --version # ensure with gflags
|
- run: ./db_stress --version # ensure with gflags
|
||||||
|
- run: make clean
|
||||||
|
- run: apt-get remove -y libgflags-dev
|
||||||
|
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
||||||
|
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||||
|
|
||||||
build-linux-lite:
|
build-linux-lite:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run: LITE=1 make V=1 J=8 -j8 check
|
- run: LITE=1 make V=1 J=8 -j8 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-lite-release:
|
build-linux-lite-release:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout # check out the code in the project directory
|
- checkout # check out the code in the project directory
|
||||||
- run: LITE=1 make V=1 -j8 release
|
- run: LITE=1 make V=1 -j8 release
|
||||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
|
||||||
- install-gflags
|
|
||||||
- run: LITE=1 make V=1 -j8 release
|
|
||||||
- run: ./db_stress --version # ensure with gflags
|
- run: ./db_stress --version # ensure with gflags
|
||||||
|
- run: make clean
|
||||||
|
- run: apt-get remove -y libgflags-dev
|
||||||
|
- run: LITE=1 make V=1 -j8 release
|
||||||
|
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang-no_test_run:
|
build-linux-clang-no_test_run:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: xlarge
|
resource_class: xlarge
|
||||||
steps:
|
steps:
|
||||||
- checkout # check out the code in the project directory
|
- checkout # check out the code in the project directory
|
||||||
- run: sudo apt-get update -y && sudo apt-get install -y clang libgflags-dev libtbb-dev
|
|
||||||
- run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all
|
- run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang10-asan:
|
build-linux-clang10-asan:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-clang-10
|
|
||||||
- run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
|
- run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang10-mini-tsan:
|
build-linux-clang10-mini-tsan:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
resource_class: 2xlarge+
|
||||||
resource_class: 2xlarge
|
|
||||||
# find test list by `make list_all_tests`
|
|
||||||
parameters:
|
|
||||||
start_test:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
end_test:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- run: COMPILE_WITH_TSAN=1 CC=clang-13 CXX=clang++-13 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check
|
||||||
- install-clang-10
|
|
||||||
- install-gtest-parallel
|
|
||||||
- run:
|
|
||||||
name: "Build unit tests"
|
|
||||||
command: |
|
|
||||||
echo "env: $(env)"
|
|
||||||
ROCKSDBTESTS_START=<<parameters.start_test>> ROCKSDBTESTS_END=<<parameters.end_test>> ROCKSDBTESTS_SUBSET_TESTS_TO_FILE=/tmp/test_list COMPILE_WITH_TSAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 --output-sync=target build_subset_tests
|
|
||||||
- run:
|
|
||||||
name: "Run unit tests in parallel"
|
|
||||||
command: |
|
|
||||||
sed -i 's/[[:space:]]*$//; s/ / \.\//g; s/.*/.\/&/' /tmp/test_list
|
|
||||||
cat /tmp/test_list
|
|
||||||
gtest-parallel $(</tmp/test_list) --output_dir=/tmp | cat # pipe to cat to continuously output status on circleci UI. Otherwise, no status will be printed while the job is running.
|
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang10-ubsan:
|
build-linux-clang10-ubsan:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-clang-10
|
|
||||||
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
|
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-valgrind:
|
build-linux-valgrind:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-valgrind
|
|
||||||
- run: PORTABLE=1 make V=1 -j32 valgrind_test
|
- run: PORTABLE=1 make V=1 -j32 valgrind_test
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang10-clang-analyze:
|
build-linux-clang10-clang-analyze:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-clang-10
|
|
||||||
- run: sudo apt-get update -y && sudo apt-get install -y clang-tools-10
|
|
||||||
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
|
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
|
||||||
- post-steps
|
- post-steps
|
||||||
|
- run:
|
||||||
|
name: "compress test report"
|
||||||
|
command: tar -cvzf scan_build_report.tar.gz scan_build_report
|
||||||
|
when: on_fail
|
||||||
|
- store_artifacts:
|
||||||
|
path: scan_build_report.tar.gz
|
||||||
|
destination: scan_build_report
|
||||||
|
when: on_fail
|
||||||
|
|
||||||
build-linux-runner:
|
build-linux-runner:
|
||||||
machine: true
|
machine: true
|
||||||
|
@ -452,26 +341,20 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-cmake-with-folly:
|
build-linux-cmake-with-folly:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- upgrade-cmake
|
|
||||||
- setup-folly
|
- setup-folly
|
||||||
- run: (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20)
|
- run: (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20)
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-cmake-with-benchmark:
|
build-linux-cmake-with-benchmark:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- run: mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20
|
||||||
- install-benchmark
|
|
||||||
- run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20)
|
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-unity-and-headers:
|
build-linux-unity-and-headers:
|
||||||
|
@ -488,101 +371,78 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-gcc-7-with-folly:
|
build-linux-gcc-7-with-folly:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-7 g++-7 libgflags-dev
|
|
||||||
- setup-folly
|
- setup-folly
|
||||||
- run: USE_FOLLY=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 check
|
- run: USE_FOLLY=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-gcc-8-no_test_run:
|
build-linux-gcc-8-no_test_run:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
resource_class: 2xlarge
|
||||||
resource_class: xlarge
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-8 g++-8 libgflags-dev
|
- run: CC=gcc-8 CXX=g++-8 V=1 make -j32 all
|
||||||
- run: CC=gcc-8 CXX=g++-8 V=1 make -j16 all
|
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-gcc-10-cxx20-no_test_run:
|
build-linux-gcc-10-cxx20-no_test_run:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
resource_class: 2xlarge
|
||||||
resource_class: xlarge
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- run: sudo apt-get update -y && sudo apt-get install gcc-10 g++-10 libgflags-dev
|
- run: CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j32 all
|
||||||
- run: CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j16 all
|
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-gcc-11-no_test_run:
|
build-linux-gcc-11-no_test_run:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
resource_class: 2xlarge
|
||||||
resource_class: xlarge
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-11 g++-11 libgflags-dev
|
- run: CC=gcc-11 CXX=g++-11 V=1 make -j32 all microbench
|
||||||
- install-benchmark
|
|
||||||
- run: CC=gcc-11 CXX=g++-11 V=1 make -j16 all microbench
|
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang-13-no_test_run:
|
build-linux-clang-13-no_test_run:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
resource_class: 2xlarge
|
||||||
resource_class: xlarge
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-clang-13
|
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j32 all microbench
|
||||||
- install-benchmark
|
|
||||||
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j16 all microbench
|
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
|
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
|
||||||
build-linux-clang-13-asan-ubsan-with-folly:
|
build-linux-clang-13-asan-ubsan-with-folly:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-clang-13
|
|
||||||
- install-gflags
|
|
||||||
- setup-folly
|
- setup-folly
|
||||||
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check
|
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
|
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
|
||||||
build-linux-run-microbench:
|
build-linux-run-microbench:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-benchmark
|
|
||||||
- run: DEBUG_LEVEL=0 make -j32 run_microbench
|
- run: DEBUG_LEVEL=0 make -j32 run_microbench
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-mini-crashtest:
|
build-linux-mini-crashtest:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-compression-libs
|
|
||||||
- run: ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS=--duration=960 blackbox_crash_test_with_atomic_flush
|
- run: ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS=--duration=960 blackbox_crash_test_with_atomic_flush
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-crashtest-tiered-storage-bb:
|
build-linux-crashtest-tiered-storage-bb:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-compression-libs
|
|
||||||
- run:
|
- run:
|
||||||
name: "run crashtest"
|
name: "run crashtest"
|
||||||
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 blackbox_crash_test_with_tiered_storage
|
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 blackbox_crash_test_with_tiered_storage
|
||||||
|
@ -590,13 +450,10 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-crashtest-tiered-storage-wb:
|
build-linux-crashtest-tiered-storage-wb:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-compression-libs
|
|
||||||
- run:
|
- run:
|
||||||
name: "run crashtest"
|
name: "run crashtest"
|
||||||
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 whitebox_crash_test_with_tiered_storage
|
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 whitebox_crash_test_with_tiered_storage
|
||||||
|
@ -672,14 +529,10 @@ jobs:
|
||||||
build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
|
build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
|
||||||
|
|
||||||
build-linux-java:
|
build-linux-java:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
environment:
|
|
||||||
JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run:
|
- run:
|
||||||
name: "Set Java Environment"
|
name: "Set Java Environment"
|
||||||
command: |
|
command: |
|
||||||
|
@ -693,14 +546,10 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-java-static:
|
build-linux-java-static:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
environment:
|
|
||||||
JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64
|
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run:
|
- run:
|
||||||
name: "Set Java Environment"
|
name: "Set Java Environment"
|
||||||
command: |
|
command: |
|
||||||
|
@ -786,34 +635,25 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-examples:
|
build-examples:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run:
|
- run:
|
||||||
name: "Build examples"
|
name: "Build examples"
|
||||||
command: |
|
command: |
|
||||||
OPT=-DTRAVIS V=1 make -j4 static_lib && cd examples && make -j4
|
make V=1 -j4 static_lib && cd examples && make V=1 -j4
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-cmake-mingw:
|
build-cmake-mingw:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- run: update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
|
||||||
- run: sudo apt-get update -y && sudo apt-get install -y mingw-w64
|
|
||||||
- run: sudo update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
|
|
||||||
- run:
|
- run:
|
||||||
name: "Build cmake-mingw"
|
name: "Build cmake-mingw"
|
||||||
command: |
|
command: |
|
||||||
sudo apt-get install snapd && sudo snap install cmake --beta --classic
|
|
||||||
export PATH=/snap/bin:$PATH
|
|
||||||
sudo apt-get install -y openjdk-8-jdk
|
|
||||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
|
|
||||||
export PATH=$JAVA_HOME/bin:$PATH
|
export PATH=$JAVA_HOME/bin:$PATH
|
||||||
echo "JAVA_HOME=${JAVA_HOME}"
|
echo "JAVA_HOME=${JAVA_HOME}"
|
||||||
which java && java -version
|
which java && java -version
|
||||||
|
@ -822,14 +662,12 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-non-shm:
|
build-linux-non-shm:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
environment:
|
environment:
|
||||||
TEST_TMPDIR: /tmp/rocksdb_test_tmp
|
TEST_TMPDIR: /tmp/rocksdb_test_tmp
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- run: make V=1 -j32 check
|
- run: make V=1 -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
|
@ -887,13 +725,10 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-format-compatible:
|
build-format-compatible:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
|
||||||
- install-compression-libs
|
|
||||||
- run:
|
- run:
|
||||||
name: "test"
|
name: "test"
|
||||||
command: |
|
command: |
|
||||||
|
@ -904,14 +739,10 @@ jobs:
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-fuzzers:
|
build-fuzzers:
|
||||||
machine:
|
executor: linux-docker
|
||||||
image: ubuntu-2004:202111-02
|
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-clang-13
|
|
||||||
- run: sudo apt-get update -y && sudo apt-get install -y cmake ninja-build binutils liblzma-dev libz-dev pkg-config autoconf libtool
|
|
||||||
- install-libprotobuf-mutator
|
|
||||||
- run:
|
- run:
|
||||||
name: "Build rocksdb lib"
|
name: "Build rocksdb lib"
|
||||||
command: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j4 static_lib
|
command: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j4 static_lib
|
||||||
|
@ -942,12 +773,7 @@ workflows:
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux-clang10-asan
|
- build-linux-clang10-asan
|
||||||
- build-linux-clang10-ubsan
|
- build-linux-clang10-ubsan
|
||||||
- build-linux-clang10-mini-tsan:
|
- build-linux-clang10-mini-tsan
|
||||||
start_test: ""
|
|
||||||
end_test: "env_test"
|
|
||||||
- build-linux-clang10-mini-tsan:
|
|
||||||
start_test: "env_test"
|
|
||||||
end_test: ""
|
|
||||||
- build-linux-shared_lib-alt_namespace-status_checked
|
- build-linux-shared_lib-alt_namespace-status_checked
|
||||||
jobs-linux-no-test-run:
|
jobs-linux-no-test-run:
|
||||||
jobs:
|
jobs:
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
# from official ubuntu 20.04
|
||||||
|
FROM ubuntu:20.04
|
||||||
|
# update system
|
||||||
|
RUN apt-get update && apt-get upgrade -y
|
||||||
|
# install basic tools
|
||||||
|
RUN apt-get install -y vim wget curl
|
||||||
|
# install tzdata noninteractive
|
||||||
|
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
|
||||||
|
# install git and default compilers
|
||||||
|
RUN apt-get install -y git gcc g++ clang clang-tools
|
||||||
|
# install basic package
|
||||||
|
RUN apt-get install -y lsb-release software-properties-common gnupg
|
||||||
|
# install gflags, tbb
|
||||||
|
RUN apt-get install -y libgflags-dev libtbb-dev
|
||||||
|
# install compression libs
|
||||||
|
RUN apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
|
||||||
|
# install cmake
|
||||||
|
RUN apt-get install -y cmake
|
||||||
|
# install clang-13
|
||||||
|
WORKDIR /root
|
||||||
|
RUN wget https://apt.llvm.org/llvm.sh
|
||||||
|
RUN chmod +x llvm.sh
|
||||||
|
RUN ./llvm.sh 13 all
|
||||||
|
# install gcc-7, 8, 10, 11, default is 9
|
||||||
|
RUN apt-get install -y gcc-7 g++-7
|
||||||
|
RUN apt-get install -y gcc-8 g++-8
|
||||||
|
RUN apt-get install -y gcc-10 g++-10
|
||||||
|
RUN add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||||
|
RUN apt-get install -y gcc-11 g++-11
|
||||||
|
# install apt-get install -y valgrind
|
||||||
|
RUN apt-get install -y valgrind
|
||||||
|
# install folly depencencies
|
||||||
|
RUN apt-get install -y libgoogle-glog-dev
|
||||||
|
# install openjdk 8
|
||||||
|
RUN apt-get install -y openjdk-8-jdk
|
||||||
|
ENV JAVA_HOME /usr/lib/jvm/java-1.8.0-openjdk-amd64
|
||||||
|
# install mingw
|
||||||
|
RUN apt-get install -y mingw-w64
|
||||||
|
|
||||||
|
# install gtest-parallel package
|
||||||
|
RUN git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel
|
||||||
|
ENV PATH $PATH:/root/gtest-parallel
|
||||||
|
|
||||||
|
# install libprotobuf for fuzzers test
|
||||||
|
RUN apt-get install -y ninja-build binutils liblzma-dev libz-dev pkg-config autoconf libtool
|
||||||
|
RUN git clone --branch v1.0 https://github.com/google/libprotobuf-mutator.git ~/libprotobuf-mutator && cd ~/libprotobuf-mutator && git checkout ffd86a32874e5c08a143019aad1aaf0907294c9f && mkdir build && cd build && cmake .. -GNinja -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang++-13 -DCMAKE_BUILD_TYPE=Release -DLIB_PROTO_MUTATOR_DOWNLOAD_PROTOBUF=ON && ninja && ninja install
|
||||||
|
ENV PKG_CONFIG_PATH /usr/local/OFF/:/root/libprotobuf-mutator/build/external.protobuf/lib/pkgconfig/
|
||||||
|
ENV PROTOC_BIN /root/libprotobuf-mutator/build/external.protobuf/bin/protoc
|
||||||
|
|
||||||
|
# install the latest google benchmark
|
||||||
|
RUN git clone --depth 1 --branch v1.7.0 https://github.com/google/benchmark.git ~/benchmark
|
||||||
|
RUN cd ~/benchmark && mkdir build && cd build && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0 && ninja && ninja install
|
||||||
|
|
||||||
|
# clean up
|
||||||
|
RUN rm -rf /var/lib/apt/lists/*
|
||||||
|
RUN rm -rf /root/benchmark
|
|
@ -2939,7 +2939,7 @@ TEST_P(ColumnFamilyTest, CompactionSpeedupTwoColumnFamilies) {
|
||||||
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(ColumnFamilyTest, CreateAndDestoryOptions) {
|
TEST_P(ColumnFamilyTest, CreateAndDestroyOptions) {
|
||||||
std::unique_ptr<ColumnFamilyOptions> cfo(new ColumnFamilyOptions());
|
std::unique_ptr<ColumnFamilyOptions> cfo(new ColumnFamilyOptions());
|
||||||
ColumnFamilyHandle* cfh;
|
ColumnFamilyHandle* cfh;
|
||||||
Open();
|
Open();
|
||||||
|
|
|
@ -77,7 +77,7 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) {
|
||||||
options.compression = kNoCompression;
|
options.compression = kNoCompression;
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
assert(s.ok());
|
assert(s.ok());
|
||||||
assert(db);
|
assert(db);
|
||||||
|
@ -128,7 +128,7 @@ TEST_F(CompactFilesTest, MultipleLevel) {
|
||||||
options.listeners.emplace_back(collector);
|
options.listeners.emplace_back(collector);
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
ASSERT_NE(db, nullptr);
|
ASSERT_NE(db, nullptr);
|
||||||
|
@ -211,7 +211,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
|
||||||
options.listeners.emplace_back(collector);
|
options.listeners.emplace_back(collector);
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
ASSERT_NE(db, nullptr);
|
ASSERT_NE(db, nullptr);
|
||||||
|
@ -250,7 +250,7 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) {
|
||||||
options.listeners.emplace_back(collector);
|
options.listeners.emplace_back(collector);
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
assert(s.ok());
|
assert(s.ok());
|
||||||
assert(db);
|
assert(db);
|
||||||
|
@ -288,7 +288,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
|
||||||
options.listeners.emplace_back(collector);
|
options.listeners.emplace_back(collector);
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
assert(db);
|
assert(db);
|
||||||
|
@ -366,7 +366,7 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
|
||||||
options.compaction_filter = cf.get();
|
options.compaction_filter = cf.get();
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ TEST_F(CompactFilesTest, SentinelCompressionType) {
|
||||||
{CompactionStyle::kCompactionStyleLevel,
|
{CompactionStyle::kCompactionStyleLevel,
|
||||||
CompactionStyle::kCompactionStyleUniversal,
|
CompactionStyle::kCompactionStyleUniversal,
|
||||||
CompactionStyle::kCompactionStyleNone}) {
|
CompactionStyle::kCompactionStyleNone}) {
|
||||||
DestroyDB(db_name_, Options());
|
ASSERT_OK(DestroyDB(db_name_, Options()));
|
||||||
Options options;
|
Options options;
|
||||||
options.compaction_style = compaction_style;
|
options.compaction_style = compaction_style;
|
||||||
// L0: Snappy, L1: ZSTD, L2: Snappy
|
// L0: Snappy, L1: ZSTD, L2: Snappy
|
||||||
|
@ -458,7 +458,7 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) {
|
||||||
options.listeners.emplace_back(collector);
|
options.listeners.emplace_back(collector);
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
ASSERT_OK(DestroyDB(db_name_, options));
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
assert(db);
|
assert(db);
|
||||||
|
|
|
@ -764,6 +764,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksOnCompact) {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = nullptr;
|
db_ = nullptr;
|
||||||
s = DestroyDB(dbname_, options);
|
s = DestroyDB(dbname_, options);
|
||||||
|
ASSERT_OK(s);
|
||||||
std::shared_ptr<mock::MockTableFactory> mock =
|
std::shared_ptr<mock::MockTableFactory> mock =
|
||||||
std::make_shared<mock::MockTableFactory>();
|
std::make_shared<mock::MockTableFactory>();
|
||||||
options.table_factory = mock;
|
options.table_factory = mock;
|
||||||
|
|
|
@ -1718,7 +1718,7 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
|
||||||
delete metadata_ptr_;
|
delete metadata_ptr_;
|
||||||
metadata_ptr_ = nullptr;
|
metadata_ptr_ = nullptr;
|
||||||
|
|
||||||
DestroyDB(export_files_dir, options);
|
ASSERT_OK(DestroyDB(export_files_dir, options));
|
||||||
|
|
||||||
ReopenWithColumnFamilies({"default", "yoyo"}, options);
|
ReopenWithColumnFamilies({"default", "yoyo"}, options);
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ class ManualCompactionTest : public testing::Test {
|
||||||
// Get rid of any state from an old run.
|
// Get rid of any state from an old run.
|
||||||
dbname_ = ROCKSDB_NAMESPACE::test::PerThreadDBPath(
|
dbname_ = ROCKSDB_NAMESPACE::test::PerThreadDBPath(
|
||||||
"rocksdb_manual_compaction_test");
|
"rocksdb_manual_compaction_test");
|
||||||
DestroyDB(dbname_, Options());
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string dbname_;
|
std::string dbname_;
|
||||||
|
@ -130,7 +130,7 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
|
||||||
|
|
||||||
delete options.compaction_filter;
|
delete options.compaction_filter;
|
||||||
delete db;
|
delete db;
|
||||||
DestroyDB(dbname_, options);
|
ASSERT_OK(DestroyDB(dbname_, options));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ TEST_F(ManualCompactionTest, Test) {
|
||||||
|
|
||||||
// close database
|
// close database
|
||||||
delete db;
|
delete db;
|
||||||
DestroyDB(dbname_, Options());
|
ASSERT_OK(DestroyDB(dbname_, Options()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ManualCompactionTest, SkipLevel) {
|
TEST_F(ManualCompactionTest, SkipLevel) {
|
||||||
|
@ -298,7 +298,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
||||||
|
|
||||||
delete filter;
|
delete filter;
|
||||||
delete db;
|
delete db;
|
||||||
DestroyDB(dbname_, options);
|
ASSERT_OK(DestroyDB(dbname_, options));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
|
@ -69,7 +69,7 @@ std::shared_ptr<DB> OpenDb(bool read_only = false) {
|
||||||
class PerfContextTest : public testing::Test {};
|
class PerfContextTest : public testing::Test {};
|
||||||
|
|
||||||
TEST_F(PerfContextTest, SeekIntoDeletion) {
|
TEST_F(PerfContextTest, SeekIntoDeletion) {
|
||||||
DestroyDB(kDbName, Options());
|
ASSERT_OK(DestroyDB(kDbName, Options()));
|
||||||
auto db = OpenDb();
|
auto db = OpenDb();
|
||||||
WriteOptions write_options;
|
WriteOptions write_options;
|
||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
|
@ -205,7 +205,7 @@ TEST_F(PerfContextTest, StopWatchOverhead) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProfileQueries(bool enabled_time = false) {
|
void ProfileQueries(bool enabled_time = false) {
|
||||||
DestroyDB(kDbName, Options()); // Start this test with a fresh DB
|
ASSERT_OK(DestroyDB(kDbName, Options())); // Start this test with a fresh DB
|
||||||
|
|
||||||
auto db = OpenDb();
|
auto db = OpenDb();
|
||||||
|
|
||||||
|
@ -518,7 +518,7 @@ TEST_F(PerfContextTest, KeyComparisonCount) {
|
||||||
// starts to become linear to the input size.
|
// starts to become linear to the input size.
|
||||||
|
|
||||||
TEST_F(PerfContextTest, SeekKeyComparison) {
|
TEST_F(PerfContextTest, SeekKeyComparison) {
|
||||||
DestroyDB(kDbName, Options());
|
ASSERT_OK(DestroyDB(kDbName, Options()));
|
||||||
auto db = OpenDb();
|
auto db = OpenDb();
|
||||||
WriteOptions write_options;
|
WriteOptions write_options;
|
||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
|
@ -652,7 +652,7 @@ TEST_F(PerfContextTest, ToString) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(PerfContextTest, MergeOperatorTime) {
|
TEST_F(PerfContextTest, MergeOperatorTime) {
|
||||||
DestroyDB(kDbName, Options());
|
ASSERT_OK(DestroyDB(kDbName, Options()));
|
||||||
DB* db;
|
DB* db;
|
||||||
Options options;
|
Options options;
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
|
@ -833,7 +833,7 @@ TEST_F(PerfContextTest, CPUTimer) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
DestroyDB(kDbName, Options());
|
ASSERT_OK(DestroyDB(kDbName, Options()));
|
||||||
auto db = OpenDb();
|
auto db = OpenDb();
|
||||||
WriteOptions write_options;
|
WriteOptions write_options;
|
||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
|
|
|
@ -37,7 +37,8 @@ class WalManagerTest : public testing::Test {
|
||||||
table_cache_(NewLRUCache(50000, 16)),
|
table_cache_(NewLRUCache(50000, 16)),
|
||||||
write_buffer_manager_(db_options_.db_write_buffer_size),
|
write_buffer_manager_(db_options_.db_write_buffer_size),
|
||||||
current_log_number_(0) {
|
current_log_number_(0) {
|
||||||
env_.reset(MockEnv::Create(Env::Default())), DestroyDB(dbname_, Options());
|
env_.reset(MockEnv::Create(Env::Default()));
|
||||||
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Init() {
|
void Init() {
|
||||||
|
|
|
@ -171,7 +171,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
|
||||||
DB* db;
|
DB* db;
|
||||||
DBImpl* db_impl;
|
DBImpl* db_impl;
|
||||||
|
|
||||||
DestroyDB(dbname, options);
|
ASSERT_OK(DestroyDB(dbname, options));
|
||||||
|
|
||||||
DBOptions db_options(options);
|
DBOptions db_options(options);
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
|
@ -372,7 +372,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
|
||||||
ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence());
|
ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence());
|
||||||
|
|
||||||
delete db;
|
delete db;
|
||||||
DestroyDB(dbname, options);
|
ASSERT_OK(DestroyDB(dbname, options));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
|
||||||
DB* db;
|
DB* db;
|
||||||
DBImpl* db_impl;
|
DBImpl* db_impl;
|
||||||
|
|
||||||
DestroyDB(dbname, options);
|
ASSERT_OK(DestroyDB(dbname, options));
|
||||||
|
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
Status s = DB::Open(options, dbname, &db);
|
Status s = DB::Open(options, dbname, &db);
|
||||||
|
@ -441,7 +441,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
|
||||||
ASSERT_EQ("value.a2", value);
|
ASSERT_EQ("value.a2", value);
|
||||||
|
|
||||||
delete db;
|
delete db;
|
||||||
DestroyDB(dbname, options);
|
ASSERT_OK(DestroyDB(dbname, options));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
|
@ -1078,6 +1078,15 @@ class IoctlFriendlyTmpdir {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if it's running test within a docker container, in which case, the
|
||||||
|
// file system inside `overlayfs` may not support FS_IOC_GETVERSION
|
||||||
|
// skip the tests
|
||||||
|
struct stat buffer;
|
||||||
|
if (stat("/.dockerenv", &buffer) == 0) {
|
||||||
|
is_supported_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stderr, "failed to find an ioctl-friendly temporary directory;"
|
fprintf(stderr, "failed to find an ioctl-friendly temporary directory;"
|
||||||
" specify one via the TEST_IOCTL_FRIENDLY_TMPDIR envvar\n");
|
" specify one via the TEST_IOCTL_FRIENDLY_TMPDIR envvar\n");
|
||||||
std::abort();
|
std::abort();
|
||||||
|
@ -1092,8 +1101,12 @@ class IoctlFriendlyTmpdir {
|
||||||
return dir_;
|
return dir_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_supported() const { return is_supported_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::string dir_;
|
std::string dir_;
|
||||||
|
|
||||||
|
bool is_supported_ = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
@ -1102,8 +1115,10 @@ TEST_F(EnvPosixTest, PositionedAppend) {
|
||||||
EnvOptions options;
|
EnvOptions options;
|
||||||
options.use_direct_writes = true;
|
options.use_direct_writes = true;
|
||||||
options.use_mmap_writes = false;
|
options.use_mmap_writes = false;
|
||||||
IoctlFriendlyTmpdir ift;
|
std::string fname = test::PerThreadDBPath(env_, "positioned_append");
|
||||||
ASSERT_OK(env_->NewWritableFile(ift.name() + "/f", &writable_file, options));
|
SetupSyncPointsToMockDirectIO();
|
||||||
|
|
||||||
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, options));
|
||||||
const size_t kBlockSize = 4096;
|
const size_t kBlockSize = 4096;
|
||||||
const size_t kDataSize = kPageSize;
|
const size_t kDataSize = kPageSize;
|
||||||
// Write a page worth of 'a'
|
// Write a page worth of 'a'
|
||||||
|
@ -1119,7 +1134,7 @@ TEST_F(EnvPosixTest, PositionedAppend) {
|
||||||
|
|
||||||
// Verify the above
|
// Verify the above
|
||||||
std::unique_ptr<SequentialFile> seq_file;
|
std::unique_ptr<SequentialFile> seq_file;
|
||||||
ASSERT_OK(env_->NewSequentialFile(ift.name() + "/f", &seq_file, options));
|
ASSERT_OK(env_->NewSequentialFile(fname, &seq_file, options));
|
||||||
size_t scratch_len = kPageSize * 2;
|
size_t scratch_len = kPageSize * 2;
|
||||||
std::unique_ptr<char[]> scratch(new char[scratch_len]);
|
std::unique_ptr<char[]> scratch(new char[scratch_len]);
|
||||||
Slice result;
|
Slice result;
|
||||||
|
@ -1139,6 +1154,11 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) {
|
||||||
EnvOptions soptions;
|
EnvOptions soptions;
|
||||||
soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
|
soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
|
||||||
IoctlFriendlyTmpdir ift;
|
IoctlFriendlyTmpdir ift;
|
||||||
|
if (!ift.is_supported()) {
|
||||||
|
ROCKSDB_GTEST_BYPASS(
|
||||||
|
"FS_IOC_GETVERSION is not supported by the filesystem");
|
||||||
|
return;
|
||||||
|
}
|
||||||
std::string fname = ift.name() + "/testfile";
|
std::string fname = ift.name() + "/testfile";
|
||||||
std::unique_ptr<WritableFile> wfile;
|
std::unique_ptr<WritableFile> wfile;
|
||||||
ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
|
ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
|
||||||
|
@ -1181,13 +1201,13 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) {
|
||||||
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
||||||
TEST_P(EnvPosixTestWithParam, AllocateTest) {
|
TEST_P(EnvPosixTestWithParam, AllocateTest) {
|
||||||
if (env_ == Env::Default()) {
|
if (env_ == Env::Default()) {
|
||||||
IoctlFriendlyTmpdir ift;
|
SetupSyncPointsToMockDirectIO();
|
||||||
std::string fname = ift.name() + "/preallocate_testfile";
|
std::string fname = test::PerThreadDBPath(env_, "preallocate_testfile");
|
||||||
|
|
||||||
// Try fallocate in a file to see whether the target file system supports
|
// Try fallocate in a file to see whether the target file system supports
|
||||||
// it.
|
// it.
|
||||||
// Skip the test if fallocate is not supported.
|
// Skip the test if fallocate is not supported.
|
||||||
std::string fname_test_fallocate = ift.name() + "/preallocate_testfile_2";
|
std::string fname_test_fallocate =
|
||||||
|
test::PerThreadDBPath(env_, "preallocate_testfile_2");
|
||||||
int fd = -1;
|
int fd = -1;
|
||||||
do {
|
do {
|
||||||
fd = open(fname_test_fallocate.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
|
fd = open(fname_test_fallocate.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
|
||||||
|
@ -1277,6 +1297,11 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDConcurrent) {
|
||||||
|
|
||||||
// Create the files
|
// Create the files
|
||||||
IoctlFriendlyTmpdir ift;
|
IoctlFriendlyTmpdir ift;
|
||||||
|
if (!ift.is_supported()) {
|
||||||
|
ROCKSDB_GTEST_BYPASS(
|
||||||
|
"FS_IOC_GETVERSION is not supported by the filesystem");
|
||||||
|
return;
|
||||||
|
}
|
||||||
std::vector<std::string> fnames;
|
std::vector<std::string> fnames;
|
||||||
for (int i = 0; i < 1000; ++i) {
|
for (int i = 0; i < 1000; ++i) {
|
||||||
fnames.push_back(ift.name() + "/" + "testfile" + std::to_string(i));
|
fnames.push_back(ift.name() + "/" + "testfile" + std::to_string(i));
|
||||||
|
@ -1318,6 +1343,11 @@ TEST_P(EnvPosixTestWithParam, DISABLED_RandomAccessUniqueIDDeletes) {
|
||||||
soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
|
soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
|
||||||
|
|
||||||
IoctlFriendlyTmpdir ift;
|
IoctlFriendlyTmpdir ift;
|
||||||
|
if (!ift.is_supported()) {
|
||||||
|
ROCKSDB_GTEST_BYPASS(
|
||||||
|
"FS_IOC_GETVERSION is not supported by the filesystem");
|
||||||
|
return;
|
||||||
|
}
|
||||||
std::string fname = ift.name() + "/" + "testfile";
|
std::string fname = ift.name() + "/" + "testfile";
|
||||||
|
|
||||||
// Check that after file is deleted we don't get same ID again in a new
|
// Check that after file is deleted we don't get same ID again in a new
|
||||||
|
|
|
@ -48,7 +48,7 @@ class Status {
|
||||||
if (!checked_) {
|
if (!checked_) {
|
||||||
fprintf(stderr, "Failed to check Status %p\n", this);
|
fprintf(stderr, "Failed to check Status %p\n", this);
|
||||||
port::PrintStack();
|
port::PrintStack();
|
||||||
abort();
|
std::abort();
|
||||||
}
|
}
|
||||||
#endif // ROCKSDB_ASSERT_STATUS_CHECKED
|
#endif // ROCKSDB_ASSERT_STATUS_CHECKED
|
||||||
}
|
}
|
||||||
|
|
|
@ -608,7 +608,7 @@ InlineSkipList<Comparator>::FindRandomEntry() const {
|
||||||
}
|
}
|
||||||
// There is a special case where x could still be the head_
|
// There is a special case where x could still be the head_
|
||||||
// (note that the head_ contains no key).
|
// (note that the head_ contains no key).
|
||||||
return x == head_ ? head_->Next(0) : x;
|
return x == head_ && head_ != nullptr ? head_->Next(0) : x;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Comparator>
|
template <class Comparator>
|
||||||
|
|
|
@ -194,7 +194,7 @@ TEST_F(ChargeWriteBufferTest, Basic) {
|
||||||
ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
|
ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
|
||||||
ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
|
ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
|
||||||
|
|
||||||
// Destory write buffer manger should free everything
|
// Destroy write buffer manger should free everything
|
||||||
wbf.reset();
|
wbf.reset();
|
||||||
ASSERT_EQ(cache->GetPinnedUsage(), 0);
|
ASSERT_EQ(cache->GetPinnedUsage(), 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,9 +37,12 @@ class SanityTest {
|
||||||
Options options = GetOptions();
|
Options options = GetOptions();
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
std::string dbname = path_ + Name();
|
std::string dbname = path_ + Name();
|
||||||
DestroyDB(dbname, options);
|
Status s = DestroyDB(dbname, options);
|
||||||
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
}
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
Status s = DB::Open(options, dbname, &db);
|
s = DB::Open(options, dbname, &db);
|
||||||
std::unique_ptr<DB> db_guard(db);
|
std::unique_ptr<DB> db_guard(db);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
|
|
|
@ -889,7 +889,7 @@ TEST_F(LdbCmdTest, LoadCFOptionsAndOverride) {
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test");
|
std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test");
|
||||||
DestroyDB(dbname, opts);
|
ASSERT_OK(DestroyDB(dbname, opts));
|
||||||
ASSERT_OK(DB::Open(opts, dbname, &db));
|
ASSERT_OK(DB::Open(opts, dbname, &db));
|
||||||
|
|
||||||
ColumnFamilyHandle* cf_handle;
|
ColumnFamilyHandle* cf_handle;
|
||||||
|
@ -932,7 +932,7 @@ TEST_F(LdbCmdTest, UnsafeRemoveSstFile) {
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
std::string dbname = test::PerThreadDBPath(Env::Default(), "ldb_cmd_test");
|
std::string dbname = test::PerThreadDBPath(Env::Default(), "ldb_cmd_test");
|
||||||
DestroyDB(dbname, opts);
|
ASSERT_OK(DestroyDB(dbname, opts));
|
||||||
ASSERT_OK(DB::Open(opts, dbname, &db));
|
ASSERT_OK(DB::Open(opts, dbname, &db));
|
||||||
|
|
||||||
// Create three SST files
|
// Create three SST files
|
||||||
|
@ -1041,7 +1041,7 @@ TEST_F(LdbCmdTest, FileTemperatureUpdateManifest) {
|
||||||
|
|
||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test");
|
std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test");
|
||||||
DestroyDB(dbname, opts);
|
ASSERT_OK(DestroyDB(dbname, opts));
|
||||||
ASSERT_OK(DB::Open(opts, dbname, &db));
|
ASSERT_OK(DB::Open(opts, dbname, &db));
|
||||||
|
|
||||||
std::array<Temperature, 5> kTestTemps = {
|
std::array<Temperature, 5> kTestTemps = {
|
||||||
|
@ -1123,8 +1123,8 @@ TEST_F(LdbCmdTest, RenameDbAndLoadOptions) {
|
||||||
|
|
||||||
std::string old_dbname = test::PerThreadDBPath(env, "ldb_cmd_test");
|
std::string old_dbname = test::PerThreadDBPath(env, "ldb_cmd_test");
|
||||||
std::string new_dbname = old_dbname + "_2";
|
std::string new_dbname = old_dbname + "_2";
|
||||||
DestroyDB(old_dbname, opts);
|
ASSERT_OK(DestroyDB(old_dbname, opts));
|
||||||
DestroyDB(new_dbname, opts);
|
ASSERT_OK(DestroyDB(new_dbname, opts));
|
||||||
|
|
||||||
char old_arg[1024];
|
char old_arg[1024];
|
||||||
snprintf(old_arg, sizeof(old_arg), "--db=%s", old_dbname.c_str());
|
snprintf(old_arg, sizeof(old_arg), "--db=%s", old_dbname.c_str());
|
||||||
|
@ -1168,7 +1168,7 @@ TEST_F(LdbCmdTest, RenameDbAndLoadOptions) {
|
||||||
0, LDBCommandRunner::RunCommand(5, argv4, opts, LDBOptions(), nullptr));
|
0, LDBCommandRunner::RunCommand(5, argv4, opts, LDBOptions(), nullptr));
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
0, LDBCommandRunner::RunCommand(5, argv5, opts, LDBOptions(), nullptr));
|
0, LDBCommandRunner::RunCommand(5, argv5, opts, LDBOptions(), nullptr));
|
||||||
DestroyDB(new_dbname, opts);
|
ASSERT_OK(DestroyDB(new_dbname, opts));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
|
@ -22,7 +22,7 @@ class ReduceLevelTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
ReduceLevelTest() {
|
ReduceLevelTest() {
|
||||||
dbname_ = test::PerThreadDBPath("db_reduce_levels_test");
|
dbname_ = test::PerThreadDBPath("db_reduce_levels_test");
|
||||||
DestroyDB(dbname_, Options());
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||||
db_ = nullptr;
|
db_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -657,7 +657,7 @@ class BackupEngineTest : public testing::Test {
|
||||||
engine_options_->max_background_operations = 7;
|
engine_options_->max_background_operations = 7;
|
||||||
|
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// delete old LATEST_BACKUP file, which some tests create for compatibility
|
// delete old LATEST_BACKUP file, which some tests create for compatibility
|
||||||
// testing.
|
// testing.
|
||||||
|
@ -993,6 +993,12 @@ class BackupEngineTest : public testing::Test {
|
||||||
Options options_;
|
Options options_;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
void DestroyDBWithoutCheck(const std::string& dbname,
|
||||||
|
const Options& options) {
|
||||||
|
// DestroyDB may fail because the db might not be existed for some tests
|
||||||
|
DestroyDB(dbname, options).PermitUncheckedError();
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<BackupEngineOptions> engine_options_;
|
std::unique_ptr<BackupEngineOptions> engine_options_;
|
||||||
}; // BackupEngineTest
|
}; // BackupEngineTest
|
||||||
|
|
||||||
|
@ -1033,7 +1039,7 @@ TEST_F(BackupEngineTest, FileCollision) {
|
||||||
|
|
||||||
// If the db directory has been cleaned up, it is sensitive to file
|
// If the db directory has been cleaned up, it is sensitive to file
|
||||||
// collision.
|
// collision.
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// open fresh DB, but old backups present
|
// open fresh DB, but old backups present
|
||||||
OpenDBAndBackupEngine(false /* destroy_old_data */, false /* dummy */,
|
OpenDBAndBackupEngine(false /* destroy_old_data */, false /* dummy */,
|
||||||
|
@ -1054,7 +1060,7 @@ TEST_F(BackupEngineTest, FileCollision) {
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// delete old data
|
// delete old data
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1099,7 +1105,7 @@ TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) {
|
||||||
// second iter -- don't flush before backup
|
// second iter -- don't flush before backup
|
||||||
for (int iter = 0; iter < 2; ++iter) {
|
for (int iter = 0; iter < 2; ++iter) {
|
||||||
// delete old data
|
// delete old data
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
bool destroy_data = true;
|
bool destroy_data = true;
|
||||||
|
|
||||||
// every iteration --
|
// every iteration --
|
||||||
|
@ -1118,7 +1124,7 @@ TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) {
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), iter == 0))
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), iter == 0))
|
||||||
<< "iter: " << iter << ", idx: " << i;
|
<< "iter: " << iter << ", idx: " << i;
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// ---- make sure it's empty ----
|
// ---- make sure it's empty ----
|
||||||
DB* db = OpenDB();
|
DB* db = OpenDB();
|
||||||
|
@ -1146,7 +1152,7 @@ TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) {
|
||||||
const int max_key = keys_iteration * 4 + 10;
|
const int max_key = keys_iteration * 4 + 10;
|
||||||
Random rnd(7);
|
Random rnd(7);
|
||||||
// delete old data
|
// delete old data
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// TODO: Implement & test db_paths support in backup (not supported in
|
// TODO: Implement & test db_paths support in backup (not supported in
|
||||||
// restore)
|
// restore)
|
||||||
|
@ -1171,7 +1177,7 @@ TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) {
|
||||||
}
|
}
|
||||||
// close and destroy
|
// close and destroy
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// ---- make sure it's empty ----
|
// ---- make sure it's empty ----
|
||||||
DB* db = OpenDB();
|
DB* db = OpenDB();
|
||||||
|
@ -1547,7 +1553,7 @@ TEST_F(BackupEngineTest, TableFileCorruptedBeforeBackup) {
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// Enable table file checksum in DB manifest
|
// Enable table file checksum in DB manifest
|
||||||
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
||||||
|
@ -1580,7 +1586,7 @@ TEST_F(BackupEngineTest, BlobFileCorruptedBeforeBackup) {
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// Enable file checksum in DB manifest
|
// Enable file checksum in DB manifest
|
||||||
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
||||||
|
@ -1614,7 +1620,7 @@ TEST_P(BackupEngineTestWithParam, TableFileCorruptedBeforeBackup) {
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// Enable table checksums in DB manifest
|
// Enable table checksums in DB manifest
|
||||||
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
||||||
|
@ -1643,7 +1649,7 @@ TEST_P(BackupEngineTestWithParam, BlobFileCorruptedBeforeBackup) {
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
// Enable blob file checksums in DB manifest
|
// Enable blob file checksums in DB manifest
|
||||||
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
||||||
|
@ -1695,7 +1701,7 @@ TEST_F(BackupEngineTest, TableFileWithoutDbChecksumCorruptedDuringBackup) {
|
||||||
|
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) {
|
TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) {
|
||||||
|
@ -1734,7 +1740,7 @@ TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) {
|
||||||
|
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
// delete old files in db
|
// delete old files in db
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2208,7 +2214,7 @@ TEST_F(BackupEngineTest, TableFileCorruptionBeforeIncremental) {
|
||||||
}
|
}
|
||||||
|
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2273,7 +2279,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) {
|
||||||
// Even though we have "the same" DB state as backup 1, we need
|
// Even though we have "the same" DB state as backup 1, we need
|
||||||
// to restore to recreate the same conditions as later restore.
|
// to restore to recreate the same conditions as later restore.
|
||||||
db_.reset();
|
db_.reset();
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_));
|
ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_));
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
|
@ -2294,7 +2300,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) {
|
||||||
|
|
||||||
// Restore backup 1 (again)
|
// Restore backup 1 (again)
|
||||||
db_.reset();
|
db_.reset();
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_));
|
ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_));
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
|
@ -2332,7 +2338,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) {
|
||||||
EXPECT_EQ(children.size(), 3U); // Another SST added
|
EXPECT_EQ(children.size(), 3U); // Another SST added
|
||||||
}
|
}
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
}
|
}
|
||||||
|
@ -2609,7 +2615,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimiting) {
|
||||||
// destroy old data
|
// destroy old data
|
||||||
Options options;
|
Options options;
|
||||||
options.env = special_env.get();
|
options.env = special_env.get();
|
||||||
DestroyDB(dbname_, options);
|
DestroyDBWithoutCheck(dbname_, options);
|
||||||
|
|
||||||
if (custom_rate_limiter) {
|
if (custom_rate_limiter) {
|
||||||
std::shared_ptr<RateLimiter> backup_rate_limiter =
|
std::shared_ptr<RateLimiter> backup_rate_limiter =
|
||||||
|
@ -2699,7 +2705,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) {
|
||||||
|
|
||||||
Options options;
|
Options options;
|
||||||
options.env = special_env.get();
|
options.env = special_env.get();
|
||||||
DestroyDB(dbname_, options);
|
DestroyDBWithoutCheck(dbname_, options);
|
||||||
// Rate limiter uses `CondVar::TimedWait()`, which does not have access to the
|
// Rate limiter uses `CondVar::TimedWait()`, which does not have access to the
|
||||||
// `Env` to advance its time according to the fake wait duration. The
|
// `Env` to advance its time according to the fake wait duration. The
|
||||||
// workaround is to install a callback that advance the `Env`'s mock time.
|
// workaround is to install a callback that advance the `Env`'s mock time.
|
||||||
|
@ -2743,7 +2749,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) {
|
||||||
|
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
AssertBackupConsistency(backup_id, 0, 10000, 10010);
|
AssertBackupConsistency(backup_id, 0, 10000, 10010);
|
||||||
DestroyDB(dbname_, options);
|
DestroyDBWithoutCheck(dbname_, options);
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
|
||||||
|
@ -2760,7 +2766,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) {
|
||||||
10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */));
|
10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */));
|
||||||
engine_options_->backup_rate_limiter = backup_rate_limiter;
|
engine_options_->backup_rate_limiter = backup_rate_limiter;
|
||||||
|
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
|
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
|
||||||
kShareWithChecksum /* shared_option */);
|
kShareWithChecksum /* shared_option */);
|
||||||
FillDB(db_.get(), 0, 10);
|
FillDB(db_.get(), 0, 10);
|
||||||
|
@ -2784,7 +2790,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) {
|
||||||
total_bytes_through_with_no_read_charged);
|
total_bytes_through_with_no_read_charged);
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
AssertBackupConsistency(1, 0, 10, 20);
|
AssertBackupConsistency(1, 0, 10, 20);
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
|
TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
|
||||||
|
@ -2798,20 +2804,20 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
|
||||||
10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */));
|
10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */));
|
||||||
engine_options_->restore_rate_limiter = restore_rate_limiter;
|
engine_options_->restore_rate_limiter = restore_rate_limiter;
|
||||||
|
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
OpenDBAndBackupEngine(true /* destroy_old_data */);
|
OpenDBAndBackupEngine(true /* destroy_old_data */);
|
||||||
FillDB(db_.get(), 0, 10);
|
FillDB(db_.get(), 0, 10);
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(),
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(),
|
||||||
false /* flush_before_backup */));
|
false /* flush_before_backup */));
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
|
|
||||||
OpenBackupEngine(false /* destroy_old_data */);
|
OpenBackupEngine(false /* destroy_old_data */);
|
||||||
ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_));
|
ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_));
|
||||||
std::int64_t total_bytes_through_with_no_read_charged =
|
std::int64_t total_bytes_through_with_no_read_charged =
|
||||||
restore_rate_limiter->GetTotalBytesThrough();
|
restore_rate_limiter->GetTotalBytesThrough();
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
|
|
||||||
restore_rate_limiter.reset(NewGenericRateLimiter(
|
restore_rate_limiter.reset(NewGenericRateLimiter(
|
||||||
restore_rate_limiter_limit, 100 * 1000 /* refill_period_us */,
|
restore_rate_limiter_limit, 100 * 1000 /* refill_period_us */,
|
||||||
|
@ -2826,7 +2832,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
|
||||||
total_bytes_through_with_no_read_charged * 2);
|
total_bytes_through_with_no_read_charged * 2);
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
AssertBackupConsistency(1, 0, 10, 20);
|
AssertBackupConsistency(1, 0, 10, 20);
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(BackupEngineRateLimitingTestWithParam,
|
TEST_P(BackupEngineRateLimitingTestWithParam,
|
||||||
|
@ -2840,7 +2846,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam,
|
||||||
10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */));
|
10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */));
|
||||||
engine_options_->backup_rate_limiter = backup_rate_limiter;
|
engine_options_->backup_rate_limiter = backup_rate_limiter;
|
||||||
|
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
OpenDBAndBackupEngine(true /* destroy_old_data */);
|
OpenDBAndBackupEngine(true /* destroy_old_data */);
|
||||||
FillDB(db_.get(), 0, 10);
|
FillDB(db_.get(), 0, 10);
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(),
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(),
|
||||||
|
@ -2857,7 +2863,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam,
|
||||||
EXPECT_GT(engine_options_->backup_rate_limiter->GetTotalBytesThrough(),
|
EXPECT_GT(engine_options_->backup_rate_limiter->GetTotalBytesThrough(),
|
||||||
total_bytes_through_before_initialize);
|
total_bytes_through_before_initialize);
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
}
|
}
|
||||||
|
|
||||||
class BackupEngineRateLimitingTestWithParam2
|
class BackupEngineRateLimitingTestWithParam2
|
||||||
|
@ -2908,7 +2914,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
|
||||||
});
|
});
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
|
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
|
||||||
kShareWithChecksum /* shared_option */);
|
kShareWithChecksum /* shared_option */);
|
||||||
|
|
||||||
|
@ -2954,7 +2960,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
|
||||||
total_bytes_through_before_initialize);
|
total_bytes_through_before_initialize);
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
OpenBackupEngine(false /* destroy_old_data */);
|
OpenBackupEngine(false /* destroy_old_data */);
|
||||||
int64_t total_bytes_through_before_restore =
|
int64_t total_bytes_through_before_restore =
|
||||||
engine_options_->restore_rate_limiter->GetTotalBytesThrough();
|
engine_options_->restore_rate_limiter->GetTotalBytesThrough();
|
||||||
|
@ -2965,7 +2971,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
|
||||||
total_bytes_through_before_restore);
|
total_bytes_through_before_restore);
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
|
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDBWithoutCheck(dbname_, Options());
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
|
||||||
|
@ -2975,14 +2981,14 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
|
||||||
#endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
#endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, ReadOnlyBackupEngine) {
|
TEST_F(BackupEngineTest, ReadOnlyBackupEngine) {
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
FillDB(db_.get(), 0, 100);
|
FillDB(db_.get(), 0, 100);
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||||
FillDB(db_.get(), 100, 200);
|
FillDB(db_.get(), 100, 200);
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
|
|
||||||
engine_options_->destroy_old_data = false;
|
engine_options_->destroy_old_data = false;
|
||||||
test_backup_fs_->ClearWrittenFiles();
|
test_backup_fs_->ClearWrittenFiles();
|
||||||
|
@ -3007,7 +3013,7 @@ TEST_F(BackupEngineTest, ReadOnlyBackupEngine) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
|
TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
options_.write_dbid_to_manifest = false;
|
options_.write_dbid_to_manifest = false;
|
||||||
|
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
|
@ -3020,7 +3026,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
|
||||||
FillDB(db_.get(), 100, 200);
|
FillDB(db_.get(), 100, 200);
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), /*flush*/ false));
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), /*flush*/ false));
|
||||||
db_.reset(); // CloseDB
|
db_.reset(); // CloseDB
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
BackupInfo backup_info;
|
BackupInfo backup_info;
|
||||||
// First, check that we get empty fields without include_file_details
|
// First, check that we get empty fields without include_file_details
|
||||||
ASSERT_OK(backup_engine_->GetBackupInfo(/*id*/ 1U, &backup_info,
|
ASSERT_OK(backup_engine_->GetBackupInfo(/*id*/ 1U, &backup_info,
|
||||||
|
@ -3073,7 +3079,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) {
|
TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) {
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
// Too big for this small DB
|
// Too big for this small DB
|
||||||
engine_options_->callback_trigger_interval_size = 100000;
|
engine_options_->callback_trigger_interval_size = 100000;
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
|
@ -3093,11 +3099,11 @@ TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) {
|
||||||
[&is_callback_invoked]() { is_callback_invoked = true; }));
|
[&is_callback_invoked]() { is_callback_invoked = true; }));
|
||||||
ASSERT_TRUE(is_callback_invoked);
|
ASSERT_TRUE(is_callback_invoked);
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, GarbageCollectionBeforeBackup) {
|
TEST_F(BackupEngineTest, GarbageCollectionBeforeBackup) {
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
|
|
||||||
ASSERT_OK(backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared"));
|
ASSERT_OK(backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared"));
|
||||||
|
@ -3151,7 +3157,7 @@ TEST_F(BackupEngineTest, EnvFailures) {
|
||||||
|
|
||||||
// Read from meta-file failure
|
// Read from meta-file failure
|
||||||
{
|
{
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
FillDB(db_.get(), 0, 100);
|
FillDB(db_.get(), 0, 100);
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||||
|
@ -3176,7 +3182,7 @@ TEST_F(BackupEngineTest, EnvFailures) {
|
||||||
// Verify manifest can roll while a backup is being created with the old
|
// Verify manifest can roll while a backup is being created with the old
|
||||||
// manifest.
|
// manifest.
|
||||||
TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) {
|
TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) {
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
options_.max_manifest_file_size = 0; // always rollover manifest for file add
|
options_.max_manifest_file_size = 0; // always rollover manifest for file add
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
FillDB(db_.get(), 0, 100, kAutoFlushOnly);
|
FillDB(db_.get(), 0, 100, kAutoFlushOnly);
|
||||||
|
@ -3213,7 +3219,7 @@ TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) {
|
||||||
ASSERT_TRUE(db_chroot_env_->FileExists(prev_manifest_path).IsNotFound());
|
ASSERT_TRUE(db_chroot_env_->FileExists(prev_manifest_path).IsNotFound());
|
||||||
|
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
AssertBackupConsistency(0, 0, 100);
|
AssertBackupConsistency(0, 0, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3265,7 +3271,7 @@ TEST_F(BackupEngineTest, BackupWithMetadata) {
|
||||||
ASSERT_EQ(std::to_string(i), backup_info.app_metadata);
|
ASSERT_EQ(std::to_string(i), backup_info.app_metadata);
|
||||||
}
|
}
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, BinaryMetadata) {
|
TEST_F(BackupEngineTest, BinaryMetadata) {
|
||||||
|
@ -3283,7 +3289,7 @@ TEST_F(BackupEngineTest, BinaryMetadata) {
|
||||||
ASSERT_EQ(1, backup_infos.size());
|
ASSERT_EQ(1, backup_infos.size());
|
||||||
ASSERT_EQ(binaryMetadata, backup_infos[0].app_metadata);
|
ASSERT_EQ(binaryMetadata, backup_infos[0].app_metadata);
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, MetadataTooLarge) {
|
TEST_F(BackupEngineTest, MetadataTooLarge) {
|
||||||
|
@ -3292,7 +3298,7 @@ TEST_F(BackupEngineTest, MetadataTooLarge) {
|
||||||
ASSERT_NOK(
|
ASSERT_NOK(
|
||||||
backup_engine_->CreateNewBackupWithMetadata(db_.get(), largeMetadata));
|
backup_engine_->CreateNewBackupWithMetadata(db_.get(), largeMetadata));
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, MetaSchemaVersion2_SizeCorruption) {
|
TEST_F(BackupEngineTest, MetaSchemaVersion2_SizeCorruption) {
|
||||||
|
@ -3734,7 +3740,7 @@ TEST_F(BackupEngineTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) {
|
||||||
ASSERT_EQ(2, backup_infos[1].backup_id);
|
ASSERT_EQ(2, backup_infos[1].backup_id);
|
||||||
ASSERT_EQ(4, backup_infos[2].backup_id);
|
ASSERT_EQ(4, backup_infos[2].backup_id);
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BackupEngineTest, CreateWhenLatestBackupCorrupted) {
|
TEST_F(BackupEngineTest, CreateWhenLatestBackupCorrupted) {
|
||||||
|
@ -3942,7 +3948,7 @@ TEST_F(BackupEngineTest, BackgroundThreadCpuPriority) {
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDBWithoutCheck(dbname_, options_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Populates `*total_size` with the size of all files under `backup_dir`.
|
// Populates `*total_size` with the size of all files under `backup_dir`.
|
||||||
|
|
|
@ -122,7 +122,8 @@ private:
|
||||||
class CassandraFunctionalTest : public testing::Test {
|
class CassandraFunctionalTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
CassandraFunctionalTest() {
|
CassandraFunctionalTest() {
|
||||||
DestroyDB(kDbName, Options()); // Start each test with a fresh DB
|
EXPECT_OK(
|
||||||
|
DestroyDB(kDbName, Options())); // Start each test with a fresh DB
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<DB> OpenDb() {
|
std::shared_ptr<DB> OpenDb() {
|
||||||
|
|
|
@ -39,7 +39,7 @@ class RangeLockingTest : public ::testing::Test {
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
dbname = test::PerThreadDBPath("range_locking_testdb");
|
dbname = test::PerThreadDBPath("range_locking_testdb");
|
||||||
|
|
||||||
DestroyDB(dbname, options);
|
EXPECT_OK(DestroyDB(dbname, options));
|
||||||
|
|
||||||
range_lock_mgr.reset(NewRangeLockManager(nullptr));
|
range_lock_mgr.reset(NewRangeLockManager(nullptr));
|
||||||
txn_db_options.lock_mgr_handle = range_lock_mgr;
|
txn_db_options.lock_mgr_handle = range_lock_mgr;
|
||||||
|
@ -55,7 +55,7 @@ class RangeLockingTest : public ::testing::Test {
|
||||||
// seems to be a bug in btrfs that the makes readdir return recently
|
// seems to be a bug in btrfs that the makes readdir return recently
|
||||||
// unlink-ed files. By using the default fs we simply ignore errors resulted
|
// unlink-ed files. By using the default fs we simply ignore errors resulted
|
||||||
// from attempting to delete such files in DestroyDB.
|
// from attempting to delete such files in DestroyDB.
|
||||||
DestroyDB(dbname, options);
|
EXPECT_OK(DestroyDB(dbname, options));
|
||||||
}
|
}
|
||||||
|
|
||||||
PessimisticTransaction* NewTxn(
|
PessimisticTransaction* NewTxn(
|
||||||
|
|
|
@ -39,12 +39,12 @@ class OptimisticTransactionTest
|
||||||
options.merge_operator.reset(new TestPutOperator());
|
options.merge_operator.reset(new TestPutOperator());
|
||||||
dbname = test::PerThreadDBPath("optimistic_transaction_testdb");
|
dbname = test::PerThreadDBPath("optimistic_transaction_testdb");
|
||||||
|
|
||||||
DestroyDB(dbname, options);
|
EXPECT_OK(DestroyDB(dbname, options));
|
||||||
Open();
|
Open();
|
||||||
}
|
}
|
||||||
~OptimisticTransactionTest() override {
|
~OptimisticTransactionTest() override {
|
||||||
delete txn_db;
|
delete txn_db;
|
||||||
DestroyDB(dbname, options);
|
EXPECT_OK(DestroyDB(dbname, options));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Reopen() {
|
void Reopen() {
|
||||||
|
|
|
@ -57,12 +57,12 @@ class TtlTest : public testing::Test {
|
||||||
options_.max_compaction_bytes = 1;
|
options_.max_compaction_bytes = 1;
|
||||||
// compaction should take place always from level0 for determinism
|
// compaction should take place always from level0 for determinism
|
||||||
db_ttl_ = nullptr;
|
db_ttl_ = nullptr;
|
||||||
DestroyDB(dbname_, Options());
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||||
}
|
}
|
||||||
|
|
||||||
~TtlTest() override {
|
~TtlTest() override {
|
||||||
CloseTtl();
|
CloseTtl();
|
||||||
DestroyDB(dbname_, Options());
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open database with TTL support when TTL not provided with db_ttl_ pointer
|
// Open database with TTL support when TTL not provided with db_ttl_ pointer
|
||||||
|
|
|
@ -246,7 +246,7 @@ class WBWIBaseTest : public testing::Test {
|
||||||
MergeOperators::CreateFromStringId("stringappend");
|
MergeOperators::CreateFromStringId("stringappend");
|
||||||
options_.create_if_missing = true;
|
options_.create_if_missing = true;
|
||||||
dbname_ = test::PerThreadDBPath("write_batch_with_index_test");
|
dbname_ = test::PerThreadDBPath("write_batch_with_index_test");
|
||||||
DestroyDB(dbname_, options_);
|
EXPECT_OK(DestroyDB(dbname_, options_));
|
||||||
batch_.reset(new WriteBatchWithIndex(BytewiseComparator(), 20, overwrite));
|
batch_.reset(new WriteBatchWithIndex(BytewiseComparator(), 20, overwrite));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ class WBWIBaseTest : public testing::Test {
|
||||||
if (db_ != nullptr) {
|
if (db_ != nullptr) {
|
||||||
ReleaseSnapshot();
|
ReleaseSnapshot();
|
||||||
delete db_;
|
delete db_;
|
||||||
DestroyDB(dbname_, options_);
|
EXPECT_OK(DestroyDB(dbname_, options_));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue