Compare commits
110 Commits
siying-pat
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
f6339de0d2 | ||
|
b11ff347b4 | ||
|
3f263ef536 | ||
|
e66e6d2faa | ||
|
204a42ca97 | ||
|
c4cd8e1acc | ||
|
b58a1a035b | ||
|
f6d9730ea1 | ||
|
bfc6a8ee4a | ||
|
cdaa9576bb | ||
|
07c6807113 | ||
|
bcb1287235 | ||
|
6442a62e46 | ||
|
e96e8e2d05 | ||
|
e943bbdd2f | ||
|
e8d604cf85 | ||
|
26768edb65 | ||
|
0d1613aad6 | ||
|
e78451f3f6 | ||
|
7b7a37c069 | ||
|
c5c58708db | ||
|
4527bb2fed | ||
|
89571b30e5 | ||
|
39b6c5791a | ||
|
9f7968b2ed | ||
|
7b55b50839 | ||
|
d1cc91c142 | ||
|
9381436bf3 | ||
|
e03d958b91 | ||
|
00889cf8f2 | ||
|
736a7b5433 | ||
|
62d84e2a2b | ||
|
b7aaa98762 | ||
|
a62506aee2 | ||
|
49628c9a83 | ||
|
46f8889b6a | ||
|
e62c23cce4 | ||
|
9d634dd5b6 | ||
|
68ac507f96 | ||
|
bf68d1c93d | ||
|
95663ff763 | ||
|
de537dcaf1 | ||
|
270179bb12 | ||
|
8b74cea7fe | ||
|
b82edffc7b | ||
|
440c7f6306 | ||
|
bb87164db3 | ||
|
4b9a1a2f56 | ||
|
b6ec3328af | ||
|
2b5df21e95 | ||
|
cda34dd64a | ||
|
06394ff4e7 | ||
|
37f490834d | ||
|
2b5c29f9f3 | ||
|
aafb377bb5 | ||
|
fce65e7e4f | ||
|
94e245a14d | ||
|
d9d456de49 | ||
|
68ee228dec | ||
|
9d0cae7104 | ||
|
ce2d8a4239 | ||
|
1eb279dcce | ||
|
c5d367f472 | ||
|
3653029dda | ||
|
6d2577e567 | ||
|
fb9a167a55 | ||
|
5bd374b392 | ||
|
ac29645743 | ||
|
fff28a7725 | ||
|
d13825e586 | ||
|
1bac873fcf | ||
|
2ea4205a69 | ||
|
9b5790f018 | ||
|
a5063c8931 | ||
|
01fdec23fe | ||
|
682fc8ba6a | ||
|
bbf5867353 | ||
|
690f1edf37 | ||
|
1601433b3a | ||
|
e83c55439a | ||
|
41237dd306 | ||
|
3d473235d4 | ||
|
673ada8225 | ||
|
4f9c0fd083 | ||
|
db536ee045 | ||
|
be81609b43 | ||
|
0c7f455f85 | ||
|
d5dfa8c6fe | ||
|
e91ec64cac | ||
|
082eb04200 | ||
|
fe63899d1a | ||
|
0bd4dcde6b | ||
|
844a35108b | ||
|
d6e016be6d | ||
|
fefacd33e3 | ||
|
b3a6fb7e86 | ||
|
5645207758 | ||
|
dc1c90c4e3 | ||
|
9454e744ed | ||
|
7c7df1850a | ||
|
efd035164b | ||
|
f934a0af46 | ||
|
1eee99fc8c | ||
|
0b81efed1d | ||
|
ae82d91492 | ||
|
63e68a4e77 | ||
|
b7db7eae26 | ||
|
f241d082b6 | ||
|
f3bcac39a6 | ||
|
0ad9ee30ce |
@ -2,7 +2,6 @@ version: 2.1
|
|||||||
|
|
||||||
orbs:
|
orbs:
|
||||||
win: circleci/windows@2.4.0
|
win: circleci/windows@2.4.0
|
||||||
slack: circleci/slack@3.4.2
|
|
||||||
|
|
||||||
aliases:
|
aliases:
|
||||||
- ¬ify-on-main-failure
|
- ¬ify-on-main-failure
|
||||||
@ -57,7 +56,6 @@ commands:
|
|||||||
|
|
||||||
post-steps:
|
post-steps:
|
||||||
steps:
|
steps:
|
||||||
- slack/status: *notify-on-main-failure
|
|
||||||
- store_test_results: # store test result if there's any
|
- store_test_results: # store test result if there's any
|
||||||
path: /tmp/test-results
|
path: /tmp/test-results
|
||||||
- store_artifacts: # store LOG for debugging if there's any
|
- store_artifacts: # store LOG for debugging if there's any
|
||||||
@ -113,6 +111,12 @@ commands:
|
|||||||
cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0
|
cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0
|
||||||
ninja && sudo ninja install
|
ninja && sudo ninja install
|
||||||
|
|
||||||
|
install-valgrind:
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Install valgrind
|
||||||
|
command: sudo apt-get update -y && sudo apt-get install -y valgrind
|
||||||
|
|
||||||
upgrade-cmake:
|
upgrade-cmake:
|
||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
@ -176,7 +180,7 @@ jobs:
|
|||||||
- increase-max-open-files-on-macos
|
- increase-max-open-files-on-macos
|
||||||
- install-gflags-on-macos
|
- install-gflags-on-macos
|
||||||
- pre-steps-macos
|
- pre-steps-macos
|
||||||
- run: ulimit -S -n 1048576 && OPT=-DCIRCLECI make V=1 J=32 -j32 all
|
- run: ulimit -S -n `ulimit -H -n` && OPT=-DCIRCLECI make V=1 J=32 -j32 all
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-macos-cmake:
|
build-macos-cmake:
|
||||||
@ -195,7 +199,7 @@ jobs:
|
|||||||
- pre-steps-macos
|
- pre-steps-macos
|
||||||
- run:
|
- run:
|
||||||
name: "cmake generate project file"
|
name: "cmake generate project file"
|
||||||
command: ulimit -S -n 1048576 && mkdir build && cd build && cmake -DWITH_GFLAGS=1 ..
|
command: ulimit -S -n `ulimit -H -n` && mkdir build && cd build && cmake -DWITH_GFLAGS=1 ..
|
||||||
- run:
|
- run:
|
||||||
name: "Build tests"
|
name: "Build tests"
|
||||||
command: cd build && make V=1 -j32
|
command: cd build && make V=1 -j32
|
||||||
@ -204,14 +208,14 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
name: "Run even tests"
|
name: "Run even tests"
|
||||||
command: ulimit -S -n 1048576 && cd build && ctest -j32 -I 0,,2
|
command: ulimit -S -n `ulimit -H -n` && cd build && ctest -j32 -I 0,,2
|
||||||
- when:
|
- when:
|
||||||
condition:
|
condition:
|
||||||
not: << parameters.run_even_tests >>
|
not: << parameters.run_even_tests >>
|
||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
name: "Run odd tests"
|
name: "Run odd tests"
|
||||||
command: ulimit -S -n 1048576 && cd build && ctest -j32 -I 1,,2
|
command: ulimit -S -n `ulimit -H -n` && cd build && ctest -j32 -I 1,,2
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux:
|
build-linux:
|
||||||
@ -224,14 +228,16 @@ jobs:
|
|||||||
- run: make V=1 J=32 -j32 check
|
- run: make V=1 J=32 -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-encrypted-env:
|
build-linux-encrypted_env-no_compression:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202111-02
|
image: ubuntu-2004:202111-02
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- install-gflags
|
||||||
- run: ENCRYPTED_ENV=1 make V=1 J=32 -j32 check
|
- run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
|
||||||
|
- run: |
|
||||||
|
./sst_dump --help | egrep -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-shared_lib-alt_namespace-status_checked:
|
build-linux-shared_lib-alt_namespace-status_checked:
|
||||||
@ -312,7 +318,7 @@ jobs:
|
|||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- install-gflags
|
||||||
- install-clang-10
|
- install-clang-10
|
||||||
- run: ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
|
- run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang10-mini-tsan:
|
build-linux-clang10-mini-tsan:
|
||||||
@ -356,6 +362,17 @@ jobs:
|
|||||||
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
|
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
|
build-linux-valgrind:
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2004:202111-02
|
||||||
|
resource_class: 2xlarge
|
||||||
|
steps:
|
||||||
|
- pre-steps
|
||||||
|
- install-gflags
|
||||||
|
- install-valgrind
|
||||||
|
- run: PORTABLE=1 make V=1 -j32 valgrind_test
|
||||||
|
- post-steps
|
||||||
|
|
||||||
build-linux-clang10-clang-analyze:
|
build-linux-clang10-clang-analyze:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202111-02
|
image: ubuntu-2004:202111-02
|
||||||
@ -368,7 +385,7 @@ jobs:
|
|||||||
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
|
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-cmake:
|
build-linux-cmake-with-folly:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202111-02
|
image: ubuntu-2004:202111-02
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
@ -376,10 +393,11 @@ jobs:
|
|||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- install-gflags
|
||||||
- upgrade-cmake
|
- upgrade-cmake
|
||||||
- run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20)
|
- run: make checkout_folly
|
||||||
|
- run: (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20)
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-cmake-ubuntu-20:
|
build-linux-cmake-with-benchmark:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202111-02
|
image: ubuntu-2004:202111-02
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
@ -393,22 +411,25 @@ jobs:
|
|||||||
build-linux-unity-and-headers:
|
build-linux-unity-and-headers:
|
||||||
docker: # executor type
|
docker: # executor type
|
||||||
- image: gcc:latest
|
- image: gcc:latest
|
||||||
|
environment:
|
||||||
|
EXTRA_CXXFLAGS: -mno-avx512f # Warnings-as-error in avx512fintrin.h, would be used on newer hardware
|
||||||
resource_class: large
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout # check out the code in the project directory
|
- checkout # check out the code in the project directory
|
||||||
- run: apt-get update -y && apt-get install -y libgflags-dev
|
- run: apt-get update -y && apt-get install -y libgflags-dev
|
||||||
- run: TEST_TMPDIR=/dev/shm && make V=1 -j8 unity_test
|
- run: make V=1 -j8 unity_test
|
||||||
- run: make V=1 -j8 -k check-headers # could be moved to a different build
|
- run: make V=1 -j8 -k check-headers # could be moved to a different build
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-gcc-7:
|
build-linux-gcc-7-with-folly:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202111-02
|
image: ubuntu-2004:202111-02
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
steps:
|
steps:
|
||||||
- pre-steps
|
- pre-steps
|
||||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-7 g++-7 libgflags-dev
|
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-7 g++-7 libgflags-dev
|
||||||
- run: CC=gcc-7 CXX=g++-7 V=1 make -j32 check
|
- run: make checkout_folly
|
||||||
|
- run: USE_FOLLY=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 check
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-linux-gcc-8-no_test_run:
|
build-linux-gcc-8-no_test_run:
|
||||||
@ -453,6 +474,19 @@ jobs:
|
|||||||
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j16 all microbench
|
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j16 all microbench
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
|
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
|
||||||
|
build-linux-clang-13-asan-ubsan-with-folly:
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2004:202111-02
|
||||||
|
resource_class: 2xlarge
|
||||||
|
steps:
|
||||||
|
- pre-steps
|
||||||
|
- install-clang-13
|
||||||
|
- install-gflags
|
||||||
|
- run: make checkout_folly
|
||||||
|
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check
|
||||||
|
- post-steps
|
||||||
|
|
||||||
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
|
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
|
||||||
build-linux-run-microbench:
|
build-linux-run-microbench:
|
||||||
machine:
|
machine:
|
||||||
@ -472,7 +506,7 @@ jobs:
|
|||||||
- pre-steps
|
- pre-steps
|
||||||
- install-gflags
|
- install-gflags
|
||||||
- install-compression-libs
|
- install-compression-libs
|
||||||
- run: make V=1 -j8 CRASH_TEST_EXT_ARGS=--duration=960 blackbox_crash_test_with_atomic_flush
|
- run: ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS=--duration=960 blackbox_crash_test_with_atomic_flush
|
||||||
- post-steps
|
- post-steps
|
||||||
|
|
||||||
build-windows:
|
build-windows:
|
||||||
@ -794,108 +828,76 @@ jobs:
|
|||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
build-linux:
|
jobs-linux-run-tests:
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux
|
- build-linux
|
||||||
build-linux-cmake:
|
- build-linux-cmake-with-folly
|
||||||
jobs:
|
- build-linux-gcc-7-with-folly
|
||||||
- build-linux-cmake
|
- build-linux-cmake-with-benchmark
|
||||||
- build-linux-cmake-ubuntu-20
|
- build-linux-encrypted_env-no_compression
|
||||||
build-linux-encrypted-env:
|
|
||||||
jobs:
|
|
||||||
- build-linux-encrypted-env
|
|
||||||
build-linux-shared_lib-alt_namespace-status_checked:
|
|
||||||
jobs:
|
|
||||||
- build-linux-shared_lib-alt_namespace-status_checked
|
|
||||||
build-linux-lite:
|
|
||||||
jobs:
|
|
||||||
- build-linux-lite
|
- build-linux-lite
|
||||||
build-linux-release:
|
jobs-linux-run-tests-san:
|
||||||
jobs:
|
|
||||||
- build-linux-release
|
|
||||||
build-linux-release-rtti:
|
|
||||||
jobs:
|
|
||||||
- build-linux-release-rtti
|
|
||||||
build-linux-lite-release:
|
|
||||||
jobs:
|
|
||||||
- build-linux-lite-release
|
|
||||||
build-linux-clang10-asan:
|
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux-clang10-asan
|
- build-linux-clang10-asan
|
||||||
build-linux-clang10-mini-tsan:
|
- build-linux-clang10-ubsan
|
||||||
jobs:
|
|
||||||
- build-linux-clang10-mini-tsan:
|
- build-linux-clang10-mini-tsan:
|
||||||
start_test: ""
|
start_test: ""
|
||||||
end_test: "env_test"
|
end_test: "env_test"
|
||||||
- build-linux-clang10-mini-tsan:
|
- build-linux-clang10-mini-tsan:
|
||||||
start_test: "env_test"
|
start_test: "env_test"
|
||||||
end_test: ""
|
end_test: ""
|
||||||
build-linux-clang10-ubsan:
|
- build-linux-shared_lib-alt_namespace-status_checked
|
||||||
|
jobs-linux-no-test-run:
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux-clang10-ubsan
|
- build-linux-release
|
||||||
build-linux-clang10-clang-analyze:
|
- build-linux-release-rtti
|
||||||
|
- build-linux-lite-release
|
||||||
|
- build-examples
|
||||||
|
- build-fuzzers
|
||||||
|
- build-linux-clang-no_test_run
|
||||||
|
- build-linux-clang-13-no_test_run
|
||||||
|
- build-linux-gcc-8-no_test_run
|
||||||
|
- build-linux-gcc-10-cxx20-no_test_run
|
||||||
|
- build-linux-gcc-11-no_test_run
|
||||||
|
- build-linux-arm-cmake-no_test_run
|
||||||
|
jobs-linux-other-checks:
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux-clang10-clang-analyze
|
- build-linux-clang10-clang-analyze
|
||||||
build-linux-unity-and-headers:
|
|
||||||
jobs:
|
|
||||||
- build-linux-unity-and-headers
|
- build-linux-unity-and-headers
|
||||||
build-linux-mini-crashtest:
|
|
||||||
jobs:
|
|
||||||
- build-linux-mini-crashtest
|
- build-linux-mini-crashtest
|
||||||
build-windows-vs2019:
|
jobs-windows:
|
||||||
jobs:
|
jobs:
|
||||||
- build-windows:
|
- build-windows:
|
||||||
name: "build-windows-vs2019"
|
name: "build-windows-vs2019"
|
||||||
build-windows-vs2019-cxx20:
|
|
||||||
jobs:
|
|
||||||
- build-windows:
|
- build-windows:
|
||||||
name: "build-windows-vs2019-cxx20"
|
name: "build-windows-vs2019-cxx20"
|
||||||
extra_cmake_opt: -DCMAKE_CXX_STANDARD=20
|
extra_cmake_opt: -DCMAKE_CXX_STANDARD=20
|
||||||
build-windows-vs2017:
|
|
||||||
jobs:
|
|
||||||
- build-windows:
|
- build-windows:
|
||||||
name: "build-windows-vs2017"
|
name: "build-windows-vs2017"
|
||||||
vs_year: "2017"
|
vs_year: "2017"
|
||||||
cmake_generator: "Visual Studio 15 Win64"
|
cmake_generator: "Visual Studio 15 Win64"
|
||||||
build-java:
|
- build-cmake-mingw
|
||||||
|
jobs-java:
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux-java
|
- build-linux-java
|
||||||
- build-linux-java-static
|
- build-linux-java-static
|
||||||
- build-macos-java
|
- build-macos-java
|
||||||
- build-macos-java-static
|
- build-macos-java-static
|
||||||
- build-macos-java-static-universal
|
- build-macos-java-static-universal
|
||||||
build-examples:
|
jobs-macos:
|
||||||
jobs:
|
|
||||||
- build-examples
|
|
||||||
build-linux-compilers-no_test_run:
|
|
||||||
jobs:
|
|
||||||
- build-linux-clang-no_test_run
|
|
||||||
- build-linux-clang-13-no_test_run
|
|
||||||
- build-linux-gcc-7
|
|
||||||
- build-linux-gcc-8-no_test_run
|
|
||||||
- build-linux-gcc-10-cxx20-no_test_run
|
|
||||||
- build-linux-gcc-11-no_test_run
|
|
||||||
- build-linux-arm-cmake-no_test_run
|
|
||||||
build-macos:
|
|
||||||
jobs:
|
jobs:
|
||||||
- build-macos
|
- build-macos
|
||||||
- build-macos-cmake:
|
- build-macos-cmake:
|
||||||
run_even_tests: true
|
run_even_tests: true
|
||||||
- build-macos-cmake:
|
- build-macos-cmake:
|
||||||
run_even_tests: false
|
run_even_tests: false
|
||||||
build-cmake-mingw:
|
jobs-linux-arm:
|
||||||
jobs:
|
|
||||||
- build-cmake-mingw
|
|
||||||
build-linux-arm:
|
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux-arm
|
- build-linux-arm
|
||||||
build-fuzzers:
|
|
||||||
jobs:
|
|
||||||
- build-fuzzers
|
|
||||||
nightly:
|
nightly:
|
||||||
triggers:
|
triggers:
|
||||||
- schedule:
|
- schedule:
|
||||||
cron: "0 0 * * *"
|
cron: "0 9 * * *"
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
@ -905,3 +907,5 @@ workflows:
|
|||||||
- build-linux-arm-test-full
|
- build-linux-arm-test-full
|
||||||
- build-linux-run-microbench
|
- build-linux-run-microbench
|
||||||
- build-linux-non-shm
|
- build-linux-non-shm
|
||||||
|
- build-linux-clang-13-asan-ubsan-with-folly
|
||||||
|
- build-linux-valgrind
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -95,3 +95,4 @@ fuzz/proto/gen/
|
|||||||
fuzz/crash-*
|
fuzz/crash-*
|
||||||
|
|
||||||
cmake-build-*
|
cmake-build-*
|
||||||
|
third-party/folly/
|
||||||
|
170
CMakeLists.txt
170
CMakeLists.txt
@ -40,6 +40,8 @@ include(GoogleTest)
|
|||||||
get_rocksdb_version(rocksdb_VERSION)
|
get_rocksdb_version(rocksdb_VERSION)
|
||||||
project(rocksdb
|
project(rocksdb
|
||||||
VERSION ${rocksdb_VERSION}
|
VERSION ${rocksdb_VERSION}
|
||||||
|
DESCRIPTION "An embeddable persistent key-value store for fast storage"
|
||||||
|
HOMEPAGE_URL https://rocksdb.org/
|
||||||
LANGUAGES CXX C ASM)
|
LANGUAGES CXX C ASM)
|
||||||
|
|
||||||
if(POLICY CMP0042)
|
if(POLICY CMP0042)
|
||||||
@ -78,19 +80,6 @@ if ($ENV{CIRCLECI})
|
|||||||
add_definitions(-DCIRCLECI)
|
add_definitions(-DCIRCLECI)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
|
||||||
# So only turn it on there by default.
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
|
||||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
|
||||||
# Folly does not compile with MSVC older than VS2017
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
endif()
|
endif()
|
||||||
@ -182,26 +171,6 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
string(TIMESTAMP TS "%Y-%m-%d %H:%M:%S" UTC)
|
|
||||||
set(BUILD_DATE "${TS}" CACHE STRING "the time we first built rocksdb")
|
|
||||||
|
|
||||||
find_package(Git)
|
|
||||||
|
|
||||||
if(GIT_FOUND AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git")
|
|
||||||
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_SHA COMMAND "${GIT_EXECUTABLE}" rev-parse HEAD )
|
|
||||||
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" RESULT_VARIABLE GIT_MOD COMMAND "${GIT_EXECUTABLE}" diff-index HEAD --quiet)
|
|
||||||
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_DATE COMMAND "${GIT_EXECUTABLE}" log -1 --date=format:"%Y-%m-%d %T" --format="%ad")
|
|
||||||
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_TAG RESULT_VARIABLE rv COMMAND "${GIT_EXECUTABLE}" symbolic-ref -q --short HEAD OUTPUT_STRIP_TRAILING_WHITESPACE)
|
|
||||||
if (rv AND NOT rv EQUAL 0)
|
|
||||||
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_TAG COMMAND "${GIT_EXECUTABLE}" describe --tags --exact-match OUTPUT_STRIP_TRAILING_WHITESPACE)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
set(GIT_SHA 0)
|
|
||||||
set(GIT_MOD 1)
|
|
||||||
endif()
|
|
||||||
string(REGEX REPLACE "[^0-9a-fA-F]+" "" GIT_SHA "${GIT_SHA}")
|
|
||||||
string(REGEX REPLACE "[^0-9: /-]+" "" GIT_DATE "${GIT_DATE}")
|
|
||||||
|
|
||||||
option(WITH_MD_LIBRARY "build with MD" ON)
|
option(WITH_MD_LIBRARY "build with MD" ON)
|
||||||
if(WIN32 AND MSVC)
|
if(WIN32 AND MSVC)
|
||||||
if(WITH_MD_LIBRARY)
|
if(WITH_MD_LIBRARY)
|
||||||
@ -211,9 +180,6 @@ if(WIN32 AND MSVC)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/build_version.cc)
|
|
||||||
configure_file(util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY)
|
|
||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324")
|
||||||
@ -456,30 +422,32 @@ if (ASSERT_STATUS_CHECKED)
|
|||||||
add_definitions(-DROCKSDB_ASSERT_STATUS_CHECKED)
|
add_definitions(-DROCKSDB_ASSERT_STATUS_CHECKED)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(DEFINED USE_RTTI)
|
|
||||||
if(USE_RTTI)
|
# RTTI is by default AUTO which enables it in debug and disables it in release.
|
||||||
message(STATUS "Enabling RTTI")
|
set(USE_RTTI AUTO CACHE STRING "Enable RTTI in builds")
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_USE_RTTI")
|
set_property(CACHE USE_RTTI PROPERTY STRINGS AUTO ON OFF)
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -DROCKSDB_USE_RTTI")
|
if(USE_RTTI STREQUAL "AUTO")
|
||||||
else()
|
|
||||||
if(MSVC)
|
|
||||||
message(STATUS "Disabling RTTI in Release builds. Always on in Debug.")
|
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_USE_RTTI")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GR-")
|
|
||||||
else()
|
|
||||||
message(STATUS "Disabling RTTI in Release builds")
|
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-rtti")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fno-rtti")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
message(STATUS "Enabling RTTI in Debug builds only (default)")
|
message(STATUS "Enabling RTTI in Debug builds only (default)")
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_USE_RTTI")
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_USE_RTTI")
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GR-")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GR-")
|
||||||
else()
|
else()
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fno-rtti")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fno-rtti")
|
||||||
endif()
|
endif()
|
||||||
|
elseif(USE_RTTI)
|
||||||
|
message(STATUS "Enabling RTTI in all builds")
|
||||||
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_USE_RTTI")
|
||||||
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -DROCKSDB_USE_RTTI")
|
||||||
|
else()
|
||||||
|
if(MSVC)
|
||||||
|
message(STATUS "Disabling RTTI in Release builds. Always on in Debug.")
|
||||||
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_USE_RTTI")
|
||||||
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GR-")
|
||||||
|
else()
|
||||||
|
message(STATUS "Disabling RTTI in all builds")
|
||||||
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-rtti")
|
||||||
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fno-rtti")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Used to run CI build and tests so we can run faster
|
# Used to run CI build and tests so we can run faster
|
||||||
@ -615,8 +583,9 @@ endif()
|
|||||||
|
|
||||||
include_directories(${PROJECT_SOURCE_DIR})
|
include_directories(${PROJECT_SOURCE_DIR})
|
||||||
include_directories(${PROJECT_SOURCE_DIR}/include)
|
include_directories(${PROJECT_SOURCE_DIR}/include)
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
if(USE_FOLLY)
|
||||||
include_directories(${PROJECT_SOURCE_DIR}/third-party/folly)
|
include_directories(${PROJECT_SOURCE_DIR}/third-party/folly)
|
||||||
|
add_definitions(-DUSE_FOLLY -DFOLLY_NO_CONFIG)
|
||||||
endif()
|
endif()
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
@ -628,8 +597,9 @@ set(SOURCES
|
|||||||
cache/cache_key.cc
|
cache/cache_key.cc
|
||||||
cache/cache_reservation_manager.cc
|
cache/cache_reservation_manager.cc
|
||||||
cache/clock_cache.cc
|
cache/clock_cache.cc
|
||||||
|
cache/compressed_secondary_cache.cc
|
||||||
|
cache/fast_lru_cache.cc
|
||||||
cache/lru_cache.cc
|
cache/lru_cache.cc
|
||||||
cache/lru_secondary_cache.cc
|
|
||||||
cache/sharded_cache.cc
|
cache/sharded_cache.cc
|
||||||
db/arena_wrapped_db_iter.cc
|
db/arena_wrapped_db_iter.cc
|
||||||
db/blob/blob_fetcher.cc
|
db/blob/blob_fetcher.cc
|
||||||
@ -829,6 +799,7 @@ set(SOURCES
|
|||||||
trace_replay/trace_record_result.cc
|
trace_replay/trace_record_result.cc
|
||||||
trace_replay/trace_record.cc
|
trace_replay/trace_record.cc
|
||||||
trace_replay/trace_replay.cc
|
trace_replay/trace_replay.cc
|
||||||
|
util/cleanable.cc
|
||||||
util/coding.cc
|
util/coding.cc
|
||||||
util/compaction_job_stats_impl.cc
|
util/compaction_job_stats_impl.cc
|
||||||
util/comparator.cc
|
util/comparator.cc
|
||||||
@ -849,6 +820,7 @@ set(SOURCES
|
|||||||
util/thread_local.cc
|
util/thread_local.cc
|
||||||
util/threadpool_imp.cc
|
util/threadpool_imp.cc
|
||||||
util/xxhash.cc
|
util/xxhash.cc
|
||||||
|
utilities/agg_merge/agg_merge.cc
|
||||||
utilities/backup/backup_engine.cc
|
utilities/backup/backup_engine.cc
|
||||||
utilities/blob_db/blob_compaction_filter.cc
|
utilities/blob_db/blob_compaction_filter.cc
|
||||||
utilities/blob_db/blob_db.cc
|
utilities/blob_db/blob_db.cc
|
||||||
@ -998,13 +970,12 @@ else()
|
|||||||
env/io_posix.cc)
|
env/io_posix.cc)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
if(USE_FOLLY)
|
||||||
list(APPEND SOURCES
|
list(APPEND SOURCES
|
||||||
third-party/folly/folly/detail/Futex.cpp
|
third-party/folly/folly/container/detail/F14Table.cpp
|
||||||
third-party/folly/folly/synchronization/AtomicNotification.cpp
|
third-party/folly/folly/lang/SafeAssert.cpp
|
||||||
third-party/folly/folly/synchronization/DistributedMutex.cpp
|
third-party/folly/folly/lang/ToAscii.cpp
|
||||||
third-party/folly/folly/synchronization/ParkingLot.cpp
|
third-party/folly/folly/ScopeGuard.cpp)
|
||||||
third-party/folly/folly/synchronization/WaitOptions.cpp)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(ROCKSDB_STATIC_LIB rocksdb${ARTIFACT_SUFFIX})
|
set(ROCKSDB_STATIC_LIB rocksdb${ARTIFACT_SUFFIX})
|
||||||
@ -1019,6 +990,60 @@ else()
|
|||||||
set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT})
|
set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(ROCKSDB_PLUGIN_EXTERNS "")
|
||||||
|
set(ROCKSDB_PLUGIN_BUILTINS "")
|
||||||
|
message(STATUS "ROCKSDB PLUGINS TO BUILD ${ROCKSDB_PLUGINS}")
|
||||||
|
list(APPEND PLUGINS ${ROCKSDB_PLUGINS})
|
||||||
|
foreach(PLUGIN IN LISTS PLUGINS)
|
||||||
|
set(PLUGIN_ROOT "${CMAKE_SOURCE_DIR}/plugin/${PLUGIN}/")
|
||||||
|
message("including rocksb plugin ${PLUGIN_ROOT}")
|
||||||
|
set(PLUGINMKFILE "${PLUGIN_ROOT}${PLUGIN}.mk")
|
||||||
|
if (NOT EXISTS ${PLUGINMKFILE})
|
||||||
|
message(FATAL_ERROR "Missing plugin makefile: ${PLUGINMKFILE}")
|
||||||
|
endif()
|
||||||
|
file(READ ${PLUGINMKFILE} PLUGINMK)
|
||||||
|
string(REGEX MATCH "SOURCES = ([^\n]*)" FOO ${PLUGINMK})
|
||||||
|
set(MK_SOURCES ${CMAKE_MATCH_1})
|
||||||
|
separate_arguments(MK_SOURCES)
|
||||||
|
foreach(MK_FILE IN LISTS MK_SOURCES)
|
||||||
|
list(APPEND SOURCES "${PLUGIN_ROOT}${MK_FILE}")
|
||||||
|
endforeach()
|
||||||
|
string(REGEX MATCH "_FUNC = ([^\n]*)" FOO ${PLUGINMK})
|
||||||
|
if (NOT ${CMAKE_MATCH_1} STREQUAL "")
|
||||||
|
string(APPEND ROCKSDB_PLUGIN_BUILTINS "{\"${PLUGIN}\", " ${CMAKE_MATCH_1} "},")
|
||||||
|
string(APPEND ROCKSDB_PLUGIN_EXTERNS "int " ${CMAKE_MATCH_1} "(ROCKSDB_NAMESPACE::ObjectLibrary&, const std::string&); ")
|
||||||
|
endif()
|
||||||
|
string(REGEX MATCH "_LIBS = ([^\n]*)" FOO ${PLUGINMK})
|
||||||
|
if (NOT ${CMAKE_MATCH_1} STREQUAL "")
|
||||||
|
list(APPEND THIRDPARTY_LIBS "${CMAKE_MATCH_1}")
|
||||||
|
endif()
|
||||||
|
message("THIRDPARTY_LIBS=${THIRDPARTY_LIBS}")
|
||||||
|
#TODO: We need to set any compile/link-time flags and add any link libraries
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
string(TIMESTAMP TS "%Y-%m-%d %H:%M:%S" UTC)
|
||||||
|
set(BUILD_DATE "${TS}" CACHE STRING "the time we first built rocksdb")
|
||||||
|
|
||||||
|
find_package(Git)
|
||||||
|
|
||||||
|
if(GIT_FOUND AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git")
|
||||||
|
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_SHA COMMAND "${GIT_EXECUTABLE}" rev-parse HEAD )
|
||||||
|
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" RESULT_VARIABLE GIT_MOD COMMAND "${GIT_EXECUTABLE}" diff-index HEAD --quiet)
|
||||||
|
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_DATE COMMAND "${GIT_EXECUTABLE}" log -1 --date=format:"%Y-%m-%d %T" --format="%ad")
|
||||||
|
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_TAG RESULT_VARIABLE rv COMMAND "${GIT_EXECUTABLE}" symbolic-ref -q --short HEAD OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
if (rv AND NOT rv EQUAL 0)
|
||||||
|
execute_process(WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_TAG COMMAND "${GIT_EXECUTABLE}" describe --tags --exact-match OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
set(GIT_SHA 0)
|
||||||
|
set(GIT_MOD 1)
|
||||||
|
endif()
|
||||||
|
string(REGEX REPLACE "[^0-9a-fA-F]+" "" GIT_SHA "${GIT_SHA}")
|
||||||
|
string(REGEX REPLACE "[^0-9: /-]+" "" GIT_DATE "${GIT_DATE}")
|
||||||
|
|
||||||
|
set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/build_version.cc)
|
||||||
|
configure_file(util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY)
|
||||||
|
|
||||||
add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES} ${BUILD_VERSION_CC})
|
add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES} ${BUILD_VERSION_CC})
|
||||||
target_link_libraries(${ROCKSDB_STATIC_LIB} PRIVATE
|
target_link_libraries(${ROCKSDB_STATIC_LIB} PRIVATE
|
||||||
${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
@ -1098,6 +1123,12 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
|
|||||||
COMPATIBILITY SameMajorVersion
|
COMPATIBILITY SameMajorVersion
|
||||||
)
|
)
|
||||||
|
|
||||||
|
configure_file(
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
|
||||||
|
@ONLY
|
||||||
|
)
|
||||||
|
|
||||||
install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
|
install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
|
||||||
|
|
||||||
install(DIRECTORY "${PROJECT_SOURCE_DIR}/cmake/modules" COMPONENT devel DESTINATION ${package_config_destination})
|
install(DIRECTORY "${PROJECT_SOURCE_DIR}/cmake/modules" COMPONENT devel DESTINATION ${package_config_destination})
|
||||||
@ -1136,6 +1167,13 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
|
|||||||
COMPONENT devel
|
COMPONENT devel
|
||||||
DESTINATION ${package_config_destination}
|
DESTINATION ${package_config_destination}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
|
||||||
|
COMPONENT devel
|
||||||
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig
|
||||||
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option(WITH_ALL_TESTS "Build all test, rather than a small subset" ON)
|
option(WITH_ALL_TESTS "Build all test, rather than a small subset" ON)
|
||||||
@ -1157,8 +1195,8 @@ if(WITH_TESTS)
|
|||||||
list(APPEND TESTS
|
list(APPEND TESTS
|
||||||
cache/cache_reservation_manager_test.cc
|
cache/cache_reservation_manager_test.cc
|
||||||
cache/cache_test.cc
|
cache/cache_test.cc
|
||||||
|
cache/compressed_secondary_cache_test.cc
|
||||||
cache/lru_cache_test.cc
|
cache/lru_cache_test.cc
|
||||||
cache/lru_secondary_cache_test.cc
|
|
||||||
db/blob/blob_counting_iterator_test.cc
|
db/blob/blob_counting_iterator_test.cc
|
||||||
db/blob/blob_file_addition_test.cc
|
db/blob/blob_file_addition_test.cc
|
||||||
db/blob/blob_file_builder_test.cc
|
db/blob/blob_file_builder_test.cc
|
||||||
@ -1315,6 +1353,7 @@ if(WITH_TESTS)
|
|||||||
util/thread_list_test.cc
|
util/thread_list_test.cc
|
||||||
util/thread_local_test.cc
|
util/thread_local_test.cc
|
||||||
util/work_queue_test.cc
|
util/work_queue_test.cc
|
||||||
|
utilities/agg_merge/agg_merge_test.cc
|
||||||
utilities/backup/backup_engine_test.cc
|
utilities/backup/backup_engine_test.cc
|
||||||
utilities/blob_db/blob_db_test.cc
|
utilities/blob_db/blob_db_test.cc
|
||||||
utilities/cassandra/cassandra_functional_test.cc
|
utilities/cassandra/cassandra_functional_test.cc
|
||||||
@ -1346,14 +1385,11 @@ if(WITH_TESTS)
|
|||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|
||||||
list(APPEND TESTS third-party/folly/folly/synchronization/test/DistributedMutexTest.cpp)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(TESTUTIL_SOURCE
|
set(TESTUTIL_SOURCE
|
||||||
db/db_test_util.cc
|
db/db_test_util.cc
|
||||||
monitoring/thread_status_updater_debug.cc
|
monitoring/thread_status_updater_debug.cc
|
||||||
table/mock_table.cc
|
table/mock_table.cc
|
||||||
|
utilities/agg_merge/test_agg_merge.cc
|
||||||
utilities/cassandra/test_utils.cc
|
utilities/cassandra/test_utils.cc
|
||||||
)
|
)
|
||||||
enable_testing()
|
enable_testing()
|
||||||
|
43
HISTORY.md
43
HISTORY.md
@ -1,6 +1,34 @@
|
|||||||
# Rocksdb Change Log
|
# Rocksdb Change Log
|
||||||
## Unreleased
|
## Unreleased
|
||||||
### Bug Fixes
|
### Bug Fixes
|
||||||
|
* Fixed a bug where manual flush would block forever even though flush options had wait=false.
|
||||||
|
* Fixed a bug where RocksDB could corrupt DBs with `avoid_flush_during_recovery == true` by removing valid WALs, leading to `Status::Corruption` with message like "SST file is ahead of WALs" when attempting to reopen.
|
||||||
|
* Fixed a bug in async_io path where incorrect length of data is read by FilePrefetchBuffer if data is consumed from two populated buffers and request for more data is sent.
|
||||||
|
* Fixed a CompactionFilter bug. Compaction filter used to use `Delete` to remove keys, even if the keys should be removed with `SingleDelete`. Mixing `Delete` and `SingleDelete` may cause undefined behavior.
|
||||||
|
* Fixed a bug which might cause process crash when I/O error happens when reading an index block in MultiGet().
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
* DB::GetLiveFilesStorageInfo is ready for production use.
|
||||||
|
* Add new stats PREFETCHED_BYTES_DISCARDED which records number of prefetched bytes discarded by RocksDB FilePrefetchBuffer on destruction and POLL_WAIT_MICROS records wait time for FS::Poll API completion.
|
||||||
|
|
||||||
|
### Public API changes
|
||||||
|
* Add rollback_deletion_type_callback to TransactionDBOptions so that write-prepared transactions know whether to issue a Delete or SingleDelete to cancel a previous key written during prior prepare phase. The PR aims to prevent mixing SingleDeletes and Deletes for the same key that can lead to undefined behaviors for write-prepared transactions.
|
||||||
|
* EXPERIMENTAL: Add new API AbortIO in file_system to abort the read requests submitted asynchronously.
|
||||||
|
* CompactionFilter::Decision has a new value: kRemoveWithSingleDelete. If CompactionFilter returns this decision, then CompactionIterator will use `SingleDelete` to mark a key as removed.
|
||||||
|
* Renamed CompactionFilter::Decision::kRemoveWithSingleDelete to kPurge since the latter sounds more general and hides the implementation details of how compaction iterator handles keys.
|
||||||
|
* Added ability to specify functions for Prepare and Validate to OptionsTypeInfo. Added methods to OptionTypeInfo to set the functions via an API. These methods are intended for RocksDB plugin developers for configuration management.
|
||||||
|
* Added a new immutable db options, enforce_single_del_contracts. If set to false (default is true), compaction will NOT fail due to a single delete followed by a delete for the same key. The purpose of this temporay option is to help existing use cases migrate.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
* RocksDB calls FileSystem::Poll API during FilePrefetchBuffer destruction which impacts performance as it waits for read requets completion which is not needed anymore. Calling FileSystem::AbortIO to abort those requests instead fixes that performance issue.
|
||||||
|
* Fixed unnecessary block cache contention when queries within a MultiGet batch and across parallel batches access the same data block, which previously could cause severely degraded performance in this unusual case. (In more typical MultiGet cases, this fix is expected to yield a small or negligible performance improvement.)
|
||||||
|
|
||||||
|
### Behavior changes
|
||||||
|
* Enforce the existing contract of SingleDelete so that SingleDelete cannot be mixed with Delete because it leads to undefined behavior. Fix a number of unit tests that violate the contract but happen to pass.
|
||||||
|
* ldb `--try_load_options` default to true if `--db` is specified and not creating a new DB, the user can still explicitly disable that by `--try_load_options=false` (or explicitly enable that by `--try_load_options`).
|
||||||
|
|
||||||
|
## 7.2.0 (04/15/2022)
|
||||||
|
### Bug Fixes
|
||||||
* Fixed bug which caused rocksdb failure in the situation when rocksdb was accessible using UNC path
|
* Fixed bug which caused rocksdb failure in the situation when rocksdb was accessible using UNC path
|
||||||
* Fixed a race condition when 2PC is disabled and WAL tracking in the MANIFEST is enabled. The race condition is between two background flush threads trying to install flush results, causing a WAL deletion not tracked in the MANIFEST. A future DB open may fail.
|
* Fixed a race condition when 2PC is disabled and WAL tracking in the MANIFEST is enabled. The race condition is between two background flush threads trying to install flush results, causing a WAL deletion not tracked in the MANIFEST. A future DB open may fail.
|
||||||
* Fixed a heap use-after-free race with DropColumnFamily.
|
* Fixed a heap use-after-free race with DropColumnFamily.
|
||||||
@ -9,15 +37,30 @@
|
|||||||
* Fixed a bug affecting `track_and_verify_wals_in_manifest`. Without the fix, application may see "open error: Corruption: Missing WAL with log number" while trying to open the db. The corruption is a false alarm but prevents DB open (#9766).
|
* Fixed a bug affecting `track_and_verify_wals_in_manifest`. Without the fix, application may see "open error: Corruption: Missing WAL with log number" while trying to open the db. The corruption is a false alarm but prevents DB open (#9766).
|
||||||
* Fix segfault in FilePrefetchBuffer with async_io as it doesn't wait for pending jobs to complete on destruction.
|
* Fix segfault in FilePrefetchBuffer with async_io as it doesn't wait for pending jobs to complete on destruction.
|
||||||
* Fix ERROR_HANDLER_AUTORESUME_RETRY_COUNT stat whose value was set wrong in portal.h
|
* Fix ERROR_HANDLER_AUTORESUME_RETRY_COUNT stat whose value was set wrong in portal.h
|
||||||
|
* Fixed a bug for non-TransactionDB with avoid_flush_during_recovery = true and TransactionDB where in case of crash, min_log_number_to_keep may not change on recovery and persisting a new MANIFEST with advanced log_numbers for some column families, results in "column family inconsistency" error on second recovery. As a solution the corrupted WALs whose numbers are larger than the corrupted wal and smaller than the new WAL will be moved to archive folder.
|
||||||
|
* Fixed a bug in RocksDB DB::Open() which may creates and writes to two new MANIFEST files even before recovery succeeds. Now writes to MANIFEST are persisted only after recovery is successful.
|
||||||
|
|
||||||
### New Features
|
### New Features
|
||||||
* For db_bench when --seed=0 or --seed is not set then it uses the current time as the seed value. Previously it used the value 1000.
|
* For db_bench when --seed=0 or --seed is not set then it uses the current time as the seed value. Previously it used the value 1000.
|
||||||
* For db_bench when --benchmark lists multiple tests and each test uses a seed for a RNG then the seeds across tests will no longer be repeated.
|
* For db_bench when --benchmark lists multiple tests and each test uses a seed for a RNG then the seeds across tests will no longer be repeated.
|
||||||
* Added an option to dynamically charge an updating estimated memory usage of block-based table reader to block cache if block cache available. To enable this feature, set `BlockBasedTableOptions::reserve_table_reader_memory = true`.
|
* Added an option to dynamically charge an updating estimated memory usage of block-based table reader to block cache if block cache available. To enable this feature, set `BlockBasedTableOptions::reserve_table_reader_memory = true`.
|
||||||
* Add new stat ASYNC_READ_BYTES that calculates number of bytes read during async read call and users can check if async code path is being called by RocksDB internal automatic prefetching for sequential reads.
|
* Add new stat ASYNC_READ_BYTES that calculates number of bytes read during async read call and users can check if async code path is being called by RocksDB internal automatic prefetching for sequential reads.
|
||||||
|
* Enable async prefetching if ReadOptions.readahead_size is set along with ReadOptions.async_io in FilePrefetchBuffer.
|
||||||
|
* Add event listener support on remote compaction compactor side.
|
||||||
|
* Added a dedicated integer DB property `rocksdb.live-blob-file-garbage-size` that exposes the total amount of garbage in the blob files in the current version.
|
||||||
|
* RocksDB does internal auto prefetching if it notices sequential reads. It starts with readahead size `initial_auto_readahead_size` which now can be configured through BlockBasedTableOptions.
|
||||||
|
* Add a merge operator that allows users to register specific aggregation function so that they can does aggregation using different aggregation types for different keys. See comments in include/rocksdb/utilities/agg_merge.h for actual usage. The feature is experimental and the format is subject to change and we won't provide a migration tool.
|
||||||
|
* Meta-internal / Experimental: Improve CPU performance by replacing many uses of std::unordered_map with folly::F14FastMap when RocksDB is compiled together with Folly.
|
||||||
|
* Experimental: Add CompressedSecondaryCache, a concrete implementation of rocksdb::SecondaryCache, that integrates with compression libraries (e.g. LZ4) to hold compressed blocks.
|
||||||
|
|
||||||
### Behavior changes
|
### Behavior changes
|
||||||
* Disallow usage of commit-time-write-batch for write-prepared/write-unprepared transactions if TransactionOptions::use_only_the_last_commit_time_batch_for_recovery is false to prevent two (or more) uncommitted versions of the same key in the database. Otherwise, bottommost compaction may violate the internal key uniqueness invariant of SSTs if the sequence numbers of both internal keys are zeroed out (#9794).
|
* Disallow usage of commit-time-write-batch for write-prepared/write-unprepared transactions if TransactionOptions::use_only_the_last_commit_time_batch_for_recovery is false to prevent two (or more) uncommitted versions of the same key in the database. Otherwise, bottommost compaction may violate the internal key uniqueness invariant of SSTs if the sequence numbers of both internal keys are zeroed out (#9794).
|
||||||
|
* Make DB::GetUpdatesSince() return NotSupported early for write-prepared/write-unprepared transactions, as the API contract indicates.
|
||||||
|
|
||||||
|
### Public API changes
|
||||||
|
* Exposed APIs to examine results of block cache stats collections in a structured way. In particular, users of `GetMapProperty()` with property `kBlockCacheEntryStats` can now use the functions in `BlockCacheEntryStatsMapKeys` to find stats in the map.
|
||||||
|
* Add `fail_if_not_bottommost_level` to IngestExternalFileOptions so that ingestion will fail if the file(s) cannot be ingested to the bottommost level.
|
||||||
|
* Add output parameter `is_in_sec_cache` to `SecondaryCache::Lookup()`. It is to indicate whether the handle is possibly erased from the secondary cache after the Lookup.
|
||||||
|
|
||||||
## 7.1.0 (03/23/2022)
|
## 7.1.0 (03/23/2022)
|
||||||
### New Features
|
### New Features
|
||||||
|
@ -180,8 +180,7 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
|||||||
* **iOS**:
|
* **iOS**:
|
||||||
* Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`.
|
* Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`.
|
||||||
|
|
||||||
* **Windows**:
|
* **Windows** (Visual Studio 2017 to up):
|
||||||
* For building with MS Visual Studio 13 you will need Update 4 installed.
|
|
||||||
* Read and follow the instructions at CMakeLists.txt
|
* Read and follow the instructions at CMakeLists.txt
|
||||||
* Or install via [vcpkg](https://github.com/microsoft/vcpkg)
|
* Or install via [vcpkg](https://github.com/microsoft/vcpkg)
|
||||||
* run `vcpkg install rocksdb:x64-windows`
|
* run `vcpkg install rocksdb:x64-windows`
|
||||||
|
217
Makefile
217
Makefile
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
BASH_EXISTS := $(shell which bash)
|
BASH_EXISTS := $(shell which bash)
|
||||||
SHELL := $(shell which bash)
|
SHELL := $(shell which bash)
|
||||||
include python.mk
|
include common.mk
|
||||||
|
|
||||||
CLEAN_FILES = # deliberately empty, so we can append below.
|
CLEAN_FILES = # deliberately empty, so we can append below.
|
||||||
CFLAGS += ${EXTRA_CFLAGS}
|
CFLAGS += ${EXTRA_CFLAGS}
|
||||||
@ -232,14 +232,20 @@ include make_config.mk
|
|||||||
|
|
||||||
ROCKSDB_PLUGIN_MKS = $(foreach plugin, $(ROCKSDB_PLUGINS), plugin/$(plugin)/*.mk)
|
ROCKSDB_PLUGIN_MKS = $(foreach plugin, $(ROCKSDB_PLUGINS), plugin/$(plugin)/*.mk)
|
||||||
include $(ROCKSDB_PLUGIN_MKS)
|
include $(ROCKSDB_PLUGIN_MKS)
|
||||||
ROCKSDB_PLUGIN_SOURCES = $(foreach plugin, $(ROCKSDB_PLUGINS), $(foreach source, $($(plugin)_SOURCES), plugin/$(plugin)/$(source)))
|
ROCKSDB_PLUGIN_PROTO =ROCKSDB_NAMESPACE::ObjectLibrary\&, const std::string\&
|
||||||
ROCKSDB_PLUGIN_HEADERS = $(foreach plugin, $(ROCKSDB_PLUGINS), $(foreach header, $($(plugin)_HEADERS), plugin/$(plugin)/$(header)))
|
ROCKSDB_PLUGIN_SOURCES = $(foreach p, $(ROCKSDB_PLUGINS), $(foreach source, $($(p)_SOURCES), plugin/$(p)/$(source)))
|
||||||
|
ROCKSDB_PLUGIN_HEADERS = $(foreach p, $(ROCKSDB_PLUGINS), $(foreach header, $($(p)_HEADERS), plugin/$(p)/$(header)))
|
||||||
|
ROCKSDB_PLUGIN_LIBS = $(foreach p, $(ROCKSDB_PLUGINS), $(foreach lib, $($(p)_LIBS), -l$(lib)))
|
||||||
|
ROCKSDB_PLUGIN_W_FUNCS = $(foreach p, $(ROCKSDB_PLUGINS), $(if $($(p)_FUNC), $(p)))
|
||||||
|
ROCKSDB_PLUGIN_EXTERNS = $(foreach p, $(ROCKSDB_PLUGIN_W_FUNCS), int $($(p)_FUNC)($(ROCKSDB_PLUGIN_PROTO));)
|
||||||
|
ROCKSDB_PLUGIN_BUILTINS = $(foreach p, $(ROCKSDB_PLUGIN_W_FUNCS), {\"$(p)\"\, $($(p)_FUNC)}\,)
|
||||||
|
ROCKSDB_PLUGIN_LDFLAGS = $(foreach plugin, $(ROCKSDB_PLUGINS), $($(plugin)_LDFLAGS))
|
||||||
ROCKSDB_PLUGIN_PKGCONFIG_REQUIRES = $(foreach plugin, $(ROCKSDB_PLUGINS), $($(plugin)_PKGCONFIG_REQUIRES))
|
ROCKSDB_PLUGIN_PKGCONFIG_REQUIRES = $(foreach plugin, $(ROCKSDB_PLUGINS), $($(plugin)_PKGCONFIG_REQUIRES))
|
||||||
|
|
||||||
CXXFLAGS += $(foreach plugin, $(ROCKSDB_PLUGINS), $($(plugin)_CXXFLAGS))
|
CXXFLAGS += $(foreach plugin, $(ROCKSDB_PLUGINS), $($(plugin)_CXXFLAGS))
|
||||||
|
PLATFORM_LDFLAGS += $(ROCKSDB_PLUGIN_LDFLAGS)
|
||||||
|
|
||||||
# Patch up the link flags for JNI from the plugins
|
# Patch up the link flags for JNI from the plugins
|
||||||
ROCKSDB_PLUGIN_LDFLAGS = $(foreach plugin, $(ROCKSDB_PLUGINS), $($(plugin)_LDFLAGS))
|
|
||||||
PLATFORM_LDFLAGS += $(ROCKSDB_PLUGIN_LDFLAGS)
|
|
||||||
JAVA_LDFLAGS += $(ROCKSDB_PLUGIN_LDFLAGS)
|
JAVA_LDFLAGS += $(ROCKSDB_PLUGIN_LDFLAGS)
|
||||||
JAVA_STATIC_LDFLAGS += $(ROCKSDB_PLUGIN_LDFLAGS)
|
JAVA_STATIC_LDFLAGS += $(ROCKSDB_PLUGIN_LDFLAGS)
|
||||||
|
|
||||||
@ -282,7 +288,7 @@ missing_make_config_paths := $(shell \
|
|||||||
grep "\./\S*\|/\S*" -o $(CURDIR)/make_config.mk | \
|
grep "\./\S*\|/\S*" -o $(CURDIR)/make_config.mk | \
|
||||||
while read path; \
|
while read path; \
|
||||||
do [ -e $$path ] || echo $$path; \
|
do [ -e $$path ] || echo $$path; \
|
||||||
done | sort | uniq)
|
done | sort | uniq | grep -v "/DOES/NOT/EXIST")
|
||||||
|
|
||||||
$(foreach path, $(missing_make_config_paths), \
|
$(foreach path, $(missing_make_config_paths), \
|
||||||
$(warning Warning: $(path) does not exist))
|
$(warning Warning: $(path) does not exist))
|
||||||
@ -334,6 +340,8 @@ endif
|
|||||||
# ASAN doesn't work well with jemalloc. If we're compiling with ASAN, we should use regular malloc.
|
# ASAN doesn't work well with jemalloc. If we're compiling with ASAN, we should use regular malloc.
|
||||||
ifdef COMPILE_WITH_ASAN
|
ifdef COMPILE_WITH_ASAN
|
||||||
DISABLE_JEMALLOC=1
|
DISABLE_JEMALLOC=1
|
||||||
|
ASAN_OPTIONS?=detect_stack_use_after_return=1
|
||||||
|
export ASAN_OPTIONS
|
||||||
EXEC_LDFLAGS += -fsanitize=address
|
EXEC_LDFLAGS += -fsanitize=address
|
||||||
PLATFORM_CCFLAGS += -fsanitize=address
|
PLATFORM_CCFLAGS += -fsanitize=address
|
||||||
PLATFORM_CXXFLAGS += -fsanitize=address
|
PLATFORM_CXXFLAGS += -fsanitize=address
|
||||||
@ -394,6 +402,10 @@ ifndef DISABLE_JEMALLOC
|
|||||||
ifdef JEMALLOC
|
ifdef JEMALLOC
|
||||||
PLATFORM_CXXFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
|
PLATFORM_CXXFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
|
||||||
PLATFORM_CCFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
|
PLATFORM_CCFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
|
||||||
|
ifeq ($(USE_FOLLY),1)
|
||||||
|
PLATFORM_CXXFLAGS += -DUSE_JEMALLOC
|
||||||
|
PLATFORM_CCFLAGS += -DUSE_JEMALLOC
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
ifdef WITH_JEMALLOC_FLAG
|
ifdef WITH_JEMALLOC_FLAG
|
||||||
PLATFORM_LDFLAGS += -ljemalloc
|
PLATFORM_LDFLAGS += -ljemalloc
|
||||||
@ -404,8 +416,8 @@ ifndef DISABLE_JEMALLOC
|
|||||||
PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE)
|
PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef USE_FOLLY_DISTRIBUTED_MUTEX
|
ifndef USE_FOLLY
|
||||||
USE_FOLLY_DISTRIBUTED_MUTEX=0
|
USE_FOLLY=0
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef GTEST_THROW_ON_FAILURE
|
ifndef GTEST_THROW_ON_FAILURE
|
||||||
@ -425,8 +437,12 @@ else
|
|||||||
PLATFORM_CXXFLAGS += -isystem $(GTEST_DIR)
|
PLATFORM_CXXFLAGS += -isystem $(GTEST_DIR)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
# This provides a Makefile simulation of a Meta-internal folly integration.
|
||||||
FOLLY_DIR = ./third-party/folly
|
# It is not validated for general use.
|
||||||
|
ifeq ($(USE_FOLLY),1)
|
||||||
|
ifeq (,$(FOLLY_DIR))
|
||||||
|
FOLLY_DIR = ./third-party/folly
|
||||||
|
endif
|
||||||
# AIX: pre-defined system headers are surrounded by an extern "C" block
|
# AIX: pre-defined system headers are surrounded by an extern "C" block
|
||||||
ifeq ($(PLATFORM), OS_AIX)
|
ifeq ($(PLATFORM), OS_AIX)
|
||||||
PLATFORM_CCFLAGS += -I$(FOLLY_DIR)
|
PLATFORM_CCFLAGS += -I$(FOLLY_DIR)
|
||||||
@ -435,6 +451,8 @@ ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
|||||||
PLATFORM_CCFLAGS += -isystem $(FOLLY_DIR)
|
PLATFORM_CCFLAGS += -isystem $(FOLLY_DIR)
|
||||||
PLATFORM_CXXFLAGS += -isystem $(FOLLY_DIR)
|
PLATFORM_CXXFLAGS += -isystem $(FOLLY_DIR)
|
||||||
endif
|
endif
|
||||||
|
PLATFORM_CCFLAGS += -DUSE_FOLLY -DFOLLY_NO_CONFIG
|
||||||
|
PLATFORM_CXXFLAGS += -DUSE_FOLLY -DFOLLY_NO_CONFIG
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef TEST_CACHE_LINE_SIZE
|
ifdef TEST_CACHE_LINE_SIZE
|
||||||
@ -521,7 +539,7 @@ LIB_OBJECTS += $(patsubst %.c, $(OBJ_DIR)/%.o, $(LIB_SOURCES_C))
|
|||||||
LIB_OBJECTS += $(patsubst %.S, $(OBJ_DIR)/%.o, $(LIB_SOURCES_ASM))
|
LIB_OBJECTS += $(patsubst %.S, $(OBJ_DIR)/%.o, $(LIB_SOURCES_ASM))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
ifeq ($(USE_FOLLY),1)
|
||||||
LIB_OBJECTS += $(patsubst %.cpp, $(OBJ_DIR)/%.o, $(FOLLY_SOURCES))
|
LIB_OBJECTS += $(patsubst %.cpp, $(OBJ_DIR)/%.o, $(FOLLY_SOURCES))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -556,11 +574,6 @@ ALL_SOURCES += $(ROCKSDB_PLUGIN_SOURCES)
|
|||||||
TESTS = $(patsubst %.cc, %, $(notdir $(TEST_MAIN_SOURCES)))
|
TESTS = $(patsubst %.cc, %, $(notdir $(TEST_MAIN_SOURCES)))
|
||||||
TESTS += $(patsubst %.c, %, $(notdir $(TEST_MAIN_SOURCES_C)))
|
TESTS += $(patsubst %.c, %, $(notdir $(TEST_MAIN_SOURCES_C)))
|
||||||
|
|
||||||
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
|
||||||
TESTS += folly_synchronization_distributed_mutex_test
|
|
||||||
ALL_SOURCES += third-party/folly/folly/synchronization/test/DistributedMutexTest.cc
|
|
||||||
endif
|
|
||||||
|
|
||||||
# `make check-headers` to very that each header file includes its own
|
# `make check-headers` to very that each header file includes its own
|
||||||
# dependencies
|
# dependencies
|
||||||
ifneq ($(filter check-headers, $(MAKECMDGOALS)),)
|
ifneq ($(filter check-headers, $(MAKECMDGOALS)),)
|
||||||
@ -585,9 +598,6 @@ am__v_CCH_1 =
|
|||||||
check-headers: $(HEADER_OK_FILES)
|
check-headers: $(HEADER_OK_FILES)
|
||||||
|
|
||||||
# options_settable_test doesn't pass with UBSAN as we use hack in the test
|
# options_settable_test doesn't pass with UBSAN as we use hack in the test
|
||||||
ifdef COMPILE_WITH_UBSAN
|
|
||||||
TESTS := $(shell echo $(TESTS) | sed 's/\boptions_settable_test\b//g')
|
|
||||||
endif
|
|
||||||
ifdef ASSERT_STATUS_CHECKED
|
ifdef ASSERT_STATUS_CHECKED
|
||||||
# TODO: finish fixing all tests to pass this check
|
# TODO: finish fixing all tests to pass this check
|
||||||
TESTS_FAILING_ASC = \
|
TESTS_FAILING_ASC = \
|
||||||
@ -607,10 +617,13 @@ ROCKSDBTESTS_SUBSET ?= $(TESTS)
|
|||||||
# env_test - suspicious use of test::TmpDir
|
# env_test - suspicious use of test::TmpDir
|
||||||
# deletefile_test - serial because it generates giant temporary files in
|
# deletefile_test - serial because it generates giant temporary files in
|
||||||
# its various tests. Parallel can fill up your /dev/shm
|
# its various tests. Parallel can fill up your /dev/shm
|
||||||
|
# db_bloom_filter_test - serial because excessive space usage by instances
|
||||||
|
# of DBFilterConstructionReserveMemoryTestWithParam can fill up /dev/shm
|
||||||
NON_PARALLEL_TEST = \
|
NON_PARALLEL_TEST = \
|
||||||
c_test \
|
c_test \
|
||||||
env_test \
|
env_test \
|
||||||
deletefile_test \
|
deletefile_test \
|
||||||
|
db_bloom_filter_test \
|
||||||
|
|
||||||
PARALLEL_TEST = $(filter-out $(NON_PARALLEL_TEST), $(TESTS))
|
PARALLEL_TEST = $(filter-out $(NON_PARALLEL_TEST), $(TESTS))
|
||||||
|
|
||||||
@ -728,7 +741,7 @@ else
|
|||||||
git_mod := $(shell git diff-index HEAD --quiet 2>/dev/null; echo $$?)
|
git_mod := $(shell git diff-index HEAD --quiet 2>/dev/null; echo $$?)
|
||||||
git_date := $(shell git log -1 --date=format:"%Y-%m-%d %T" --format="%ad" 2>/dev/null)
|
git_date := $(shell git log -1 --date=format:"%Y-%m-%d %T" --format="%ad" 2>/dev/null)
|
||||||
endif
|
endif
|
||||||
gen_build_version = sed -e s/@GIT_SHA@/$(git_sha)/ -e s:@GIT_TAG@:"$(git_tag)": -e s/@GIT_MOD@/"$(git_mod)"/ -e s/@BUILD_DATE@/"$(build_date)"/ -e s/@GIT_DATE@/"$(git_date)"/ util/build_version.cc.in
|
gen_build_version = sed -e s/@GIT_SHA@/$(git_sha)/ -e s:@GIT_TAG@:"$(git_tag)": -e s/@GIT_MOD@/"$(git_mod)"/ -e s/@BUILD_DATE@/"$(build_date)"/ -e s/@GIT_DATE@/"$(git_date)"/ -e s/@ROCKSDB_PLUGIN_BUILTINS@/'$(ROCKSDB_PLUGIN_BUILTINS)'/ -e s/@ROCKSDB_PLUGIN_EXTERNS@/"$(ROCKSDB_PLUGIN_EXTERNS)"/ util/build_version.cc.in
|
||||||
|
|
||||||
# Record the version of the source that we are compiling.
|
# Record the version of the source that we are compiling.
|
||||||
# We keep a record of the git revision in this file. It is then built
|
# We keep a record of the git revision in this file. It is then built
|
||||||
@ -782,17 +795,10 @@ $(SHARED4): $(LIB_OBJECTS)
|
|||||||
$(AM_V_CCLD) $(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(LIB_OBJECTS) $(LDFLAGS) -o $@
|
$(AM_V_CCLD) $(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(LIB_OBJECTS) $(LDFLAGS) -o $@
|
||||||
endif # PLATFORM_SHARED_EXT
|
endif # PLATFORM_SHARED_EXT
|
||||||
|
|
||||||
.PHONY: blackbox_crash_test check clean coverage crash_test ldb_tests package \
|
.PHONY: check clean coverage ldb_tests package dbg gen-pc build_size \
|
||||||
release tags tags0 valgrind_check whitebox_crash_test format static_lib shared_lib all \
|
release tags tags0 valgrind_check format static_lib shared_lib all \
|
||||||
dbg rocksdbjavastatic rocksdbjava gen-pc install install-static install-shared uninstall \
|
rocksdbjavastatic rocksdbjava install install-static install-shared \
|
||||||
analyze tools tools_lib check-headers \
|
uninstall analyze tools tools_lib check-headers checkout_folly
|
||||||
blackbox_crash_test_with_atomic_flush whitebox_crash_test_with_atomic_flush \
|
|
||||||
blackbox_crash_test_with_txn whitebox_crash_test_with_txn \
|
|
||||||
blackbox_crash_test_with_best_efforts_recovery \
|
|
||||||
blackbox_crash_test_with_ts whitebox_crash_test_with_ts \
|
|
||||||
blackbox_crash_test_with_multiops_wc_txn \
|
|
||||||
blackbox_crash_test_with_multiops_wp_txn
|
|
||||||
|
|
||||||
|
|
||||||
all: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(TESTS)
|
all: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(TESTS)
|
||||||
|
|
||||||
@ -829,18 +835,6 @@ coverage: clean
|
|||||||
# Delete intermediate files
|
# Delete intermediate files
|
||||||
$(FIND) . -type f \( -name "*.gcda" -o -name "*.gcno" \) -exec rm -f {} \;
|
$(FIND) . -type f \( -name "*.gcda" -o -name "*.gcno" \) -exec rm -f {} \;
|
||||||
|
|
||||||
ifneq (,$(filter check parallel_check,$(MAKECMDGOALS)),)
|
|
||||||
# Use /dev/shm if it has the sticky bit set (otherwise, /tmp),
|
|
||||||
# and create a randomly-named rocksdb.XXXX directory therein.
|
|
||||||
# We'll use that directory in the "make check" rules.
|
|
||||||
ifeq ($(TMPD),)
|
|
||||||
TMPDIR := $(shell echo $${TMPDIR:-/tmp})
|
|
||||||
TMPD := $(shell f=/dev/shm; test -k $$f || f=$(TMPDIR); \
|
|
||||||
perl -le 'use File::Temp "tempdir";' \
|
|
||||||
-e 'print tempdir("'$$f'/rocksdb.XXXX", CLEANUP => 0)')
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Run all tests in parallel, accumulating per-test logs in t/log-*.
|
# Run all tests in parallel, accumulating per-test logs in t/log-*.
|
||||||
#
|
#
|
||||||
# Each t/run-* file is a tiny generated bourne shell script that invokes one of
|
# Each t/run-* file is a tiny generated bourne shell script that invokes one of
|
||||||
@ -880,7 +874,7 @@ $(parallel_tests):
|
|||||||
TEST_SCRIPT=t/run-$$TEST_BINARY-$${TEST_NAME//\//-}; \
|
TEST_SCRIPT=t/run-$$TEST_BINARY-$${TEST_NAME//\//-}; \
|
||||||
printf '%s\n' \
|
printf '%s\n' \
|
||||||
'#!/bin/sh' \
|
'#!/bin/sh' \
|
||||||
"d=\$(TMPD)$$TEST_SCRIPT" \
|
"d=\$(TEST_TMPDIR)$$TEST_SCRIPT" \
|
||||||
'mkdir -p $$d' \
|
'mkdir -p $$d' \
|
||||||
"TEST_TMPDIR=\$$d $(DRIVER) ./$$TEST_BINARY --gtest_filter=$$TEST_NAME" \
|
"TEST_TMPDIR=\$$d $(DRIVER) ./$$TEST_BINARY --gtest_filter=$$TEST_NAME" \
|
||||||
> $$TEST_SCRIPT; \
|
> $$TEST_SCRIPT; \
|
||||||
@ -940,7 +934,6 @@ endif
|
|||||||
|
|
||||||
.PHONY: check_0
|
.PHONY: check_0
|
||||||
check_0:
|
check_0:
|
||||||
$(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \
|
|
||||||
printf '%s\n' '' \
|
printf '%s\n' '' \
|
||||||
'To monitor subtest <duration,pass/fail,name>,' \
|
'To monitor subtest <duration,pass/fail,name>,' \
|
||||||
' run "make watch-log" in a separate window' ''; \
|
' run "make watch-log" in a separate window' ''; \
|
||||||
@ -951,7 +944,8 @@ check_0:
|
|||||||
| $(prioritize_long_running_tests) \
|
| $(prioritize_long_running_tests) \
|
||||||
| grep -E '$(tests-regexp)' \
|
| grep -E '$(tests-regexp)' \
|
||||||
| grep -E -v '$(EXCLUDE_TESTS_REGEX)' \
|
| grep -E -v '$(EXCLUDE_TESTS_REGEX)' \
|
||||||
| build_tools/gnu_parallel -j$(J) --plain --joblog=LOG --eta --gnu '{} $(parallel_redir)' ; \
|
| build_tools/gnu_parallel -j$(J) --plain --joblog=LOG --eta --gnu \
|
||||||
|
--tmpdir=$(TEST_TMPDIR) '{} $(parallel_redir)' ; \
|
||||||
parallel_retcode=$$? ; \
|
parallel_retcode=$$? ; \
|
||||||
awk '{ if ($$7 != 0 || $$8 != 0) { if ($$7 == "Exitval") { h = $$0; } else { if (!f) print h; print; f = 1 } } } END { if(f) exit 1; }' < LOG ; \
|
awk '{ if ($$7 != 0 || $$8 != 0) { if ($$7 == "Exitval") { h = $$0; } else { if (!f) print h; print; f = 1 } } } END { if(f) exit 1; }' < LOG ; \
|
||||||
awk_retcode=$$?; \
|
awk_retcode=$$?; \
|
||||||
@ -962,7 +956,6 @@ valgrind-exclude-regexp = InlineSkipTest.ConcurrentInsert|TransactionStressTest.
|
|||||||
.PHONY: valgrind_check_0
|
.PHONY: valgrind_check_0
|
||||||
valgrind_check_0: test_log_prefix := valgrind_
|
valgrind_check_0: test_log_prefix := valgrind_
|
||||||
valgrind_check_0:
|
valgrind_check_0:
|
||||||
$(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \
|
|
||||||
printf '%s\n' '' \
|
printf '%s\n' '' \
|
||||||
'To monitor subtest <duration,pass/fail,name>,' \
|
'To monitor subtest <duration,pass/fail,name>,' \
|
||||||
' run "make watch-log" in a separate window' ''; \
|
' run "make watch-log" in a separate window' ''; \
|
||||||
@ -974,10 +967,11 @@ valgrind_check_0:
|
|||||||
| grep -E '$(tests-regexp)' \
|
| grep -E '$(tests-regexp)' \
|
||||||
| grep -E -v '$(valgrind-exclude-regexp)' \
|
| grep -E -v '$(valgrind-exclude-regexp)' \
|
||||||
| build_tools/gnu_parallel -j$(J) --plain --joblog=LOG --eta --gnu \
|
| build_tools/gnu_parallel -j$(J) --plain --joblog=LOG --eta --gnu \
|
||||||
'(if [[ "{}" == "./"* ]] ; then $(DRIVER) {}; else {}; fi) \
|
--tmpdir=$(TEST_TMPDIR) \
|
||||||
|
'(if [[ "{}" == "./"* ]] ; then $(DRIVER) {}; else {}; fi) \
|
||||||
$(parallel_redir)' \
|
$(parallel_redir)' \
|
||||||
|
|
||||||
CLEAN_FILES += t LOG $(TMPD)
|
CLEAN_FILES += t LOG $(TEST_TMPDIR)
|
||||||
|
|
||||||
# When running parallel "make check", you can monitor its progress
|
# When running parallel "make check", you can monitor its progress
|
||||||
# from another window.
|
# from another window.
|
||||||
@ -1000,12 +994,12 @@ check: all
|
|||||||
&& (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
|
&& (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
|
||||||
grep -q 'GNU Parallel'; \
|
grep -q 'GNU Parallel'; \
|
||||||
then \
|
then \
|
||||||
$(MAKE) T="$$t" TMPD=$(TMPD) check_0; \
|
$(MAKE) T="$$t" check_0; \
|
||||||
else \
|
else \
|
||||||
for t in $(TESTS); do \
|
for t in $(TESTS); do \
|
||||||
echo "===== Running $$t (`date`)"; ./$$t || exit 1; done; \
|
echo "===== Running $$t (`date`)"; ./$$t || exit 1; done; \
|
||||||
fi
|
fi
|
||||||
rm -rf $(TMPD)
|
rm -rf $(TEST_TMPDIR)
|
||||||
ifneq ($(PLATFORM), OS_AIX)
|
ifneq ($(PLATFORM), OS_AIX)
|
||||||
$(PYTHON) tools/check_all_python.py
|
$(PYTHON) tools/check_all_python.py
|
||||||
ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
|
ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
|
||||||
@ -1032,31 +1026,31 @@ ldb_tests: ldb
|
|||||||
include crash_test.mk
|
include crash_test.mk
|
||||||
|
|
||||||
asan_check: clean
|
asan_check: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) check -j32
|
COMPILE_WITH_ASAN=1 $(MAKE) check -j32
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
asan_crash_test: clean
|
asan_crash_test: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) crash_test
|
COMPILE_WITH_ASAN=1 $(MAKE) crash_test
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
whitebox_asan_crash_test: clean
|
whitebox_asan_crash_test: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) whitebox_crash_test
|
COMPILE_WITH_ASAN=1 $(MAKE) whitebox_crash_test
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
blackbox_asan_crash_test: clean
|
blackbox_asan_crash_test: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) blackbox_crash_test
|
COMPILE_WITH_ASAN=1 $(MAKE) blackbox_crash_test
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
asan_crash_test_with_atomic_flush: clean
|
asan_crash_test_with_atomic_flush: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) crash_test_with_atomic_flush
|
COMPILE_WITH_ASAN=1 $(MAKE) crash_test_with_atomic_flush
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
asan_crash_test_with_txn: clean
|
asan_crash_test_with_txn: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) crash_test_with_txn
|
COMPILE_WITH_ASAN=1 $(MAKE) crash_test_with_txn
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
asan_crash_test_with_best_efforts_recovery: clean
|
asan_crash_test_with_best_efforts_recovery: clean
|
||||||
ASAN_OPTIONS=detect_stack_use_after_return=1 COMPILE_WITH_ASAN=1 $(MAKE) crash_test_with_best_efforts_recovery
|
COMPILE_WITH_ASAN=1 $(MAKE) crash_test_with_best_efforts_recovery
|
||||||
$(MAKE) clean
|
$(MAKE) clean
|
||||||
|
|
||||||
ubsan_check: clean
|
ubsan_check: clean
|
||||||
@ -1102,11 +1096,11 @@ valgrind_test_some:
|
|||||||
valgrind_check: $(TESTS)
|
valgrind_check: $(TESTS)
|
||||||
$(MAKE) DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" gen_parallel_tests
|
$(MAKE) DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" gen_parallel_tests
|
||||||
$(AM_V_GEN)if test "$(J)" != 1 \
|
$(AM_V_GEN)if test "$(J)" != 1 \
|
||||||
&& (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
|
&& (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
|
||||||
grep -q 'GNU Parallel'; \
|
grep -q 'GNU Parallel'; \
|
||||||
then \
|
then \
|
||||||
$(MAKE) TMPD=$(TMPD) \
|
$(MAKE) \
|
||||||
DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" valgrind_check_0; \
|
DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" valgrind_check_0; \
|
||||||
else \
|
else \
|
||||||
for t in $(filter-out %skiplist_test options_settable_test,$(TESTS)); do \
|
for t in $(filter-out %skiplist_test options_settable_test,$(TESTS)); do \
|
||||||
$(VALGRIND_VER) $(VALGRIND_OPTS) ./$$t; \
|
$(VALGRIND_VER) $(VALGRIND_OPTS) ./$$t; \
|
||||||
@ -1126,27 +1120,6 @@ valgrind_check_some: $(ROCKSDBTESTS_SUBSET)
|
|||||||
fi; \
|
fi; \
|
||||||
done
|
done
|
||||||
|
|
||||||
ifneq ($(PAR_TEST),)
|
|
||||||
parloop:
|
|
||||||
ret_bad=0; \
|
|
||||||
for t in $(PAR_TEST); do \
|
|
||||||
echo "===== Running $$t in parallel $(NUM_PAR) (`date`)";\
|
|
||||||
if [ $(db_test) -eq 1 ]; then \
|
|
||||||
seq $(J) | v="$$t" build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{}; export TEST_TMPDIR=$$s;' \
|
|
||||||
'timeout 2m ./db_test --gtest_filter=$$v >> $$s/log-{} 2>1'; \
|
|
||||||
else\
|
|
||||||
seq $(J) | v="./$$t" build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{};' \
|
|
||||||
'export TEST_TMPDIR=$$s; timeout 10m $$v >> $$s/log-{} 2>1'; \
|
|
||||||
fi; \
|
|
||||||
ret_code=$$?; \
|
|
||||||
if [ $$ret_code -ne 0 ]; then \
|
|
||||||
ret_bad=$$ret_code; \
|
|
||||||
echo $$t exited with $$ret_code; \
|
|
||||||
fi; \
|
|
||||||
done; \
|
|
||||||
exit $$ret_bad;
|
|
||||||
endif
|
|
||||||
|
|
||||||
test_names = \
|
test_names = \
|
||||||
./db_test --gtest_list_tests \
|
./db_test --gtest_list_tests \
|
||||||
| perl -n \
|
| perl -n \
|
||||||
@ -1154,24 +1127,6 @@ test_names = \
|
|||||||
-e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};' \
|
-e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};' \
|
||||||
-e 'print qq! $$p$$2!'
|
-e 'print qq! $$p$$2!'
|
||||||
|
|
||||||
parallel_check: $(TESTS)
|
|
||||||
$(AM_V_GEN)if test "$(J)" > 1 \
|
|
||||||
&& (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
|
|
||||||
grep -q 'GNU Parallel'; \
|
|
||||||
then \
|
|
||||||
echo Running in parallel $(J); \
|
|
||||||
else \
|
|
||||||
echo "Need to have GNU Parallel and J > 1"; exit 1; \
|
|
||||||
fi; \
|
|
||||||
ret_bad=0; \
|
|
||||||
echo $(J);\
|
|
||||||
echo Test Dir: $(TMPD); \
|
|
||||||
seq $(J) | build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{}; rm -rf $$s; mkdir $$s'; \
|
|
||||||
$(MAKE) PAR_TEST="$(shell $(test_names))" TMPD=$(TMPD) \
|
|
||||||
J=$(J) db_test=1 parloop; \
|
|
||||||
$(MAKE) PAR_TEST="$(filter-out db_test, $(TESTS))" \
|
|
||||||
TMPD=$(TMPD) J=$(J) db_test=0 parloop;
|
|
||||||
|
|
||||||
analyze: clean
|
analyze: clean
|
||||||
USE_CLANG=1 $(MAKE) analyze_incremental
|
USE_CLANG=1 $(MAKE) analyze_incremental
|
||||||
|
|
||||||
@ -1300,11 +1255,6 @@ trace_analyzer: $(OBJ_DIR)/tools/trace_analyzer.o $(ANALYZE_OBJECTS) $(TOOLS_LIB
|
|||||||
block_cache_trace_analyzer: $(OBJ_DIR)/tools/block_cache_analyzer/block_cache_trace_analyzer_tool.o $(ANALYZE_OBJECTS) $(TOOLS_LIBRARY) $(LIBRARY)
|
block_cache_trace_analyzer: $(OBJ_DIR)/tools/block_cache_analyzer/block_cache_trace_analyzer_tool.o $(ANALYZE_OBJECTS) $(TOOLS_LIBRARY) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
|
||||||
folly_synchronization_distributed_mutex_test: $(OBJ_DIR)/third-party/folly/folly/synchronization/test/DistributedMutexTest.o $(TEST_LIBRARY) $(LIBRARY)
|
|
||||||
$(AM_LINK)
|
|
||||||
endif
|
|
||||||
|
|
||||||
cache_bench: $(OBJ_DIR)/cache/cache_bench.o $(CACHE_BENCH_OBJECTS) $(LIBRARY)
|
cache_bench: $(OBJ_DIR)/cache/cache_bench.o $(CACHE_BENCH_OBJECTS) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
@ -1371,6 +1321,9 @@ ribbon_test: $(OBJ_DIR)/util/ribbon_test.o $(TEST_LIBRARY) $(LIBRARY)
|
|||||||
option_change_migration_test: $(OBJ_DIR)/utilities/option_change_migration/option_change_migration_test.o $(TEST_LIBRARY) $(LIBRARY)
|
option_change_migration_test: $(OBJ_DIR)/utilities/option_change_migration/option_change_migration_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
|
agg_merge_test: $(OBJ_DIR)/utilities/agg_merge/agg_merge_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
|
$(AM_LINK)
|
||||||
|
|
||||||
stringappend_test: $(OBJ_DIR)/utilities/merge_operators/string_append/stringappend_test.o $(TEST_LIBRARY) $(LIBRARY)
|
stringappend_test: $(OBJ_DIR)/utilities/merge_operators/string_append/stringappend_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
@ -1842,7 +1795,7 @@ statistics_test: $(OBJ_DIR)/monitoring/statistics_test.o $(TEST_LIBRARY) $(LIBRA
|
|||||||
stats_history_test: $(OBJ_DIR)/monitoring/stats_history_test.o $(TEST_LIBRARY) $(LIBRARY)
|
stats_history_test: $(OBJ_DIR)/monitoring/stats_history_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
lru_secondary_cache_test: $(OBJ_DIR)/cache/lru_secondary_cache_test.o $(TEST_LIBRARY) $(LIBRARY)
|
compressed_secondary_cache_test: $(OBJ_DIR)/cache/compressed_secondary_cache_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
lru_cache_test: $(OBJ_DIR)/cache/lru_cache_test.o $(TEST_LIBRARY) $(LIBRARY)
|
lru_cache_test: $(OBJ_DIR)/cache/lru_cache_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
@ -2377,6 +2330,54 @@ commit_prereq:
|
|||||||
false # J=$(J) build_tools/precommit_checker.py unit clang_unit release clang_release tsan asan ubsan lite unit_non_shm
|
false # J=$(J) build_tools/precommit_checker.py unit clang_unit release clang_release tsan asan ubsan lite unit_non_shm
|
||||||
# $(MAKE) clean && $(MAKE) jclean && $(MAKE) rocksdbjava;
|
# $(MAKE) clean && $(MAKE) jclean && $(MAKE) rocksdbjava;
|
||||||
|
|
||||||
|
# For public CI runs, checkout folly in a way that can build with RocksDB.
|
||||||
|
# This is mostly intended as a test-only simulation of Meta-internal folly
|
||||||
|
# integration.
|
||||||
|
checkout_folly:
|
||||||
|
if [ -e third-party/folly ]; then \
|
||||||
|
cd third-party/folly && git fetch origin; \
|
||||||
|
else \
|
||||||
|
cd third-party && git clone https://github.com/facebook/folly.git; \
|
||||||
|
fi
|
||||||
|
@# Pin to a particular version for public CI, so that PR authors don't
|
||||||
|
@# need to worry about folly breaking our integration. Update periodically
|
||||||
|
cd third-party/folly && git reset --hard 98b9b2c1124e99f50f9085ddee74ce32afffc665
|
||||||
|
@# A hack to remove boost dependency.
|
||||||
|
@# NOTE: this hack is not needed if using FBCODE compiler config
|
||||||
|
perl -pi -e 's/^(#include <boost)/\/\/$$1/' third-party/folly/folly/functional/Invoke.h
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Build size testing
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
REPORT_BUILD_STATISTIC?=echo STATISTIC:
|
||||||
|
|
||||||
|
build_size:
|
||||||
|
# === normal build, static ===
|
||||||
|
$(MAKE) clean
|
||||||
|
$(MAKE) static_lib
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.static_lib $$(stat --printf="%s" librocksdb.a)
|
||||||
|
strip librocksdb.a
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.static_lib_stripped $$(stat --printf="%s" librocksdb.a)
|
||||||
|
# === normal build, shared ===
|
||||||
|
$(MAKE) clean
|
||||||
|
$(MAKE) shared_lib
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib $$(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||||
|
strip `readlink -f librocksdb.so`
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib_stripped $$(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||||
|
# === lite build, static ===
|
||||||
|
$(MAKE) clean
|
||||||
|
$(MAKE) LITE=1 static_lib
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.static_lib_lite $$(stat --printf="%s" librocksdb.a)
|
||||||
|
strip librocksdb.a
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.static_lib_lite_stripped $$(stat --printf="%s" librocksdb.a)
|
||||||
|
# === lite build, shared ===
|
||||||
|
$(MAKE) clean
|
||||||
|
$(MAKE) LITE=1 shared_lib
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib_lite $$(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||||
|
strip `readlink -f librocksdb.so`
|
||||||
|
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib_lite_stripped $$(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# Platform-specific compilation
|
# Platform-specific compilation
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
@ -2429,7 +2430,7 @@ endif
|
|||||||
ifneq ($(SKIP_DEPENDS), 1)
|
ifneq ($(SKIP_DEPENDS), 1)
|
||||||
DEPFILES = $(patsubst %.cc, $(OBJ_DIR)/%.cc.d, $(ALL_SOURCES))
|
DEPFILES = $(patsubst %.cc, $(OBJ_DIR)/%.cc.d, $(ALL_SOURCES))
|
||||||
DEPFILES+ = $(patsubst %.c, $(OBJ_DIR)/%.c.d, $(LIB_SOURCES_C) $(TEST_MAIN_SOURCES_C))
|
DEPFILES+ = $(patsubst %.c, $(OBJ_DIR)/%.c.d, $(LIB_SOURCES_C) $(TEST_MAIN_SOURCES_C))
|
||||||
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
ifeq ($(USE_FOLLY),1)
|
||||||
DEPFILES +=$(patsubst %.cpp, $(OBJ_DIR)/%.cpp.d, $(FOLLY_SOURCES))
|
DEPFILES +=$(patsubst %.cpp, $(OBJ_DIR)/%.cpp.d, $(FOLLY_SOURCES))
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
@ -2477,7 +2478,7 @@ list_all_tests:
|
|||||||
|
|
||||||
# Remove the rules for which dependencies should not be generated and see if any are left.
|
# Remove the rules for which dependencies should not be generated and see if any are left.
|
||||||
#If so, include the dependencies; if not, do not include the dependency files
|
#If so, include the dependencies; if not, do not include the dependency files
|
||||||
ROCKS_DEP_RULES=$(filter-out clean format check-format check-buck-targets check-headers check-sources jclean jtest package analyze tags rocksdbjavastatic% unity.% unity_test, $(MAKECMDGOALS))
|
ROCKS_DEP_RULES=$(filter-out clean format check-format check-buck-targets check-headers check-sources jclean jtest package analyze tags rocksdbjavastatic% unity.% unity_test checkout_folly, $(MAKECMDGOALS))
|
||||||
ifneq ("$(ROCKS_DEP_RULES)", "")
|
ifneq ("$(ROCKS_DEP_RULES)", "")
|
||||||
-include $(DEPFILES)
|
-include $(DEPFILES)
|
||||||
endif
|
endif
|
||||||
|
@ -4,3 +4,4 @@ This is the list of all known third-party plugins for RocksDB. If something is m
|
|||||||
* [HDFS](https://github.com/riversand963/rocksdb-hdfs-env): an Env used for interacting with HDFS. Migrated from main RocksDB repo
|
* [HDFS](https://github.com/riversand963/rocksdb-hdfs-env): an Env used for interacting with HDFS. Migrated from main RocksDB repo
|
||||||
* [ZenFS](https://github.com/westerndigitalcorporation/zenfs): a file system for zoned block devices
|
* [ZenFS](https://github.com/westerndigitalcorporation/zenfs): a file system for zoned block devices
|
||||||
* [RADOS](https://github.com/riversand963/rocksdb-rados-env): an Env used for interacting with RADOS. Migrated from RocksDB main repo.
|
* [RADOS](https://github.com/riversand963/rocksdb-rados-env): an Env used for interacting with RADOS. Migrated from RocksDB main repo.
|
||||||
|
* [PMEM](https://github.com/pmem/pmem-rocksdb-plugin): a collection of plugins to enable Persistent Memory on RocksDB.
|
||||||
|
89
TARGETS
89
TARGETS
@ -14,8 +14,9 @@ cpp_library_wrapper(name="rocksdb_lib", srcs=[
|
|||||||
"cache/cache_key.cc",
|
"cache/cache_key.cc",
|
||||||
"cache/cache_reservation_manager.cc",
|
"cache/cache_reservation_manager.cc",
|
||||||
"cache/clock_cache.cc",
|
"cache/clock_cache.cc",
|
||||||
|
"cache/compressed_secondary_cache.cc",
|
||||||
|
"cache/fast_lru_cache.cc",
|
||||||
"cache/lru_cache.cc",
|
"cache/lru_cache.cc",
|
||||||
"cache/lru_secondary_cache.cc",
|
|
||||||
"cache/sharded_cache.cc",
|
"cache/sharded_cache.cc",
|
||||||
"db/arena_wrapped_db_iter.cc",
|
"db/arena_wrapped_db_iter.cc",
|
||||||
"db/blob/blob_fetcher.cc",
|
"db/blob/blob_fetcher.cc",
|
||||||
@ -224,6 +225,7 @@ cpp_library_wrapper(name="rocksdb_lib", srcs=[
|
|||||||
"trace_replay/trace_record_result.cc",
|
"trace_replay/trace_record_result.cc",
|
||||||
"trace_replay/trace_replay.cc",
|
"trace_replay/trace_replay.cc",
|
||||||
"util/build_version.cc",
|
"util/build_version.cc",
|
||||||
|
"util/cleanable.cc",
|
||||||
"util/coding.cc",
|
"util/coding.cc",
|
||||||
"util/compaction_job_stats_impl.cc",
|
"util/compaction_job_stats_impl.cc",
|
||||||
"util/comparator.cc",
|
"util/comparator.cc",
|
||||||
@ -245,6 +247,7 @@ cpp_library_wrapper(name="rocksdb_lib", srcs=[
|
|||||||
"util/thread_local.cc",
|
"util/thread_local.cc",
|
||||||
"util/threadpool_imp.cc",
|
"util/threadpool_imp.cc",
|
||||||
"util/xxhash.cc",
|
"util/xxhash.cc",
|
||||||
|
"utilities/agg_merge/agg_merge.cc",
|
||||||
"utilities/backup/backup_engine.cc",
|
"utilities/backup/backup_engine.cc",
|
||||||
"utilities/blob_db/blob_compaction_filter.cc",
|
"utilities/blob_db/blob_compaction_filter.cc",
|
||||||
"utilities/blob_db/blob_db.cc",
|
"utilities/blob_db/blob_db.cc",
|
||||||
@ -324,7 +327,7 @@ cpp_library_wrapper(name="rocksdb_lib", srcs=[
|
|||||||
"utilities/wal_filter.cc",
|
"utilities/wal_filter.cc",
|
||||||
"utilities/write_batch_with_index/write_batch_with_index.cc",
|
"utilities/write_batch_with_index/write_batch_with_index.cc",
|
||||||
"utilities/write_batch_with_index/write_batch_with_index_internal.cc",
|
"utilities/write_batch_with_index/write_batch_with_index_internal.cc",
|
||||||
], deps=[], headers=None, link_whole=False, extra_test_libs=False)
|
], deps=["//folly/container:f14_hash"], headers=None, link_whole=False, extra_test_libs=False)
|
||||||
|
|
||||||
cpp_library_wrapper(name="rocksdb_whole_archive_lib", srcs=[
|
cpp_library_wrapper(name="rocksdb_whole_archive_lib", srcs=[
|
||||||
"cache/cache.cc",
|
"cache/cache.cc",
|
||||||
@ -332,8 +335,9 @@ cpp_library_wrapper(name="rocksdb_whole_archive_lib", srcs=[
|
|||||||
"cache/cache_key.cc",
|
"cache/cache_key.cc",
|
||||||
"cache/cache_reservation_manager.cc",
|
"cache/cache_reservation_manager.cc",
|
||||||
"cache/clock_cache.cc",
|
"cache/clock_cache.cc",
|
||||||
|
"cache/compressed_secondary_cache.cc",
|
||||||
|
"cache/fast_lru_cache.cc",
|
||||||
"cache/lru_cache.cc",
|
"cache/lru_cache.cc",
|
||||||
"cache/lru_secondary_cache.cc",
|
|
||||||
"cache/sharded_cache.cc",
|
"cache/sharded_cache.cc",
|
||||||
"db/arena_wrapped_db_iter.cc",
|
"db/arena_wrapped_db_iter.cc",
|
||||||
"db/blob/blob_fetcher.cc",
|
"db/blob/blob_fetcher.cc",
|
||||||
@ -542,6 +546,7 @@ cpp_library_wrapper(name="rocksdb_whole_archive_lib", srcs=[
|
|||||||
"trace_replay/trace_record_result.cc",
|
"trace_replay/trace_record_result.cc",
|
||||||
"trace_replay/trace_replay.cc",
|
"trace_replay/trace_replay.cc",
|
||||||
"util/build_version.cc",
|
"util/build_version.cc",
|
||||||
|
"util/cleanable.cc",
|
||||||
"util/coding.cc",
|
"util/coding.cc",
|
||||||
"util/compaction_job_stats_impl.cc",
|
"util/compaction_job_stats_impl.cc",
|
||||||
"util/comparator.cc",
|
"util/comparator.cc",
|
||||||
@ -563,6 +568,7 @@ cpp_library_wrapper(name="rocksdb_whole_archive_lib", srcs=[
|
|||||||
"util/thread_local.cc",
|
"util/thread_local.cc",
|
||||||
"util/threadpool_imp.cc",
|
"util/threadpool_imp.cc",
|
||||||
"util/xxhash.cc",
|
"util/xxhash.cc",
|
||||||
|
"utilities/agg_merge/agg_merge.cc",
|
||||||
"utilities/backup/backup_engine.cc",
|
"utilities/backup/backup_engine.cc",
|
||||||
"utilities/blob_db/blob_compaction_filter.cc",
|
"utilities/blob_db/blob_compaction_filter.cc",
|
||||||
"utilities/blob_db/blob_db.cc",
|
"utilities/blob_db/blob_db.cc",
|
||||||
@ -642,7 +648,7 @@ cpp_library_wrapper(name="rocksdb_whole_archive_lib", srcs=[
|
|||||||
"utilities/wal_filter.cc",
|
"utilities/wal_filter.cc",
|
||||||
"utilities/write_batch_with_index/write_batch_with_index.cc",
|
"utilities/write_batch_with_index/write_batch_with_index.cc",
|
||||||
"utilities/write_batch_with_index/write_batch_with_index_internal.cc",
|
"utilities/write_batch_with_index/write_batch_with_index_internal.cc",
|
||||||
], deps=[], headers=None, link_whole=True, extra_test_libs=False)
|
], deps=["//folly/container:f14_hash"], headers=None, link_whole=True, extra_test_libs=False)
|
||||||
|
|
||||||
cpp_library_wrapper(name="rocksdb_test_lib", srcs=[
|
cpp_library_wrapper(name="rocksdb_test_lib", srcs=[
|
||||||
"db/db_test_util.cc",
|
"db/db_test_util.cc",
|
||||||
@ -652,6 +658,7 @@ cpp_library_wrapper(name="rocksdb_test_lib", srcs=[
|
|||||||
"test_util/testutil.cc",
|
"test_util/testutil.cc",
|
||||||
"tools/block_cache_analyzer/block_cache_trace_analyzer.cc",
|
"tools/block_cache_analyzer/block_cache_trace_analyzer.cc",
|
||||||
"tools/trace_analyzer_tool.cc",
|
"tools/trace_analyzer_tool.cc",
|
||||||
|
"utilities/agg_merge/test_agg_merge.cc",
|
||||||
"utilities/cassandra/test_utils.cc",
|
"utilities/cassandra/test_utils.cc",
|
||||||
], deps=[":rocksdb_lib"], headers=None, link_whole=False, extra_test_libs=True)
|
], deps=[":rocksdb_lib"], headers=None, link_whole=False, extra_test_libs=True)
|
||||||
|
|
||||||
@ -685,6 +692,8 @@ rocks_cpp_library_wrapper(name="rocksdb_stress_lib", srcs=[
|
|||||||
], headers=None)
|
], headers=None)
|
||||||
|
|
||||||
|
|
||||||
|
cpp_binary_wrapper(name="db_stress", srcs=["db_stress_tool/db_stress.cc"], deps=[":rocksdb_stress_lib"], extra_preprocessor_flags=[], extra_bench_libs=False)
|
||||||
|
|
||||||
cpp_binary_wrapper(name="ribbon_bench", srcs=["microbench/ribbon_bench.cc"], deps=[], extra_preprocessor_flags=[], extra_bench_libs=True)
|
cpp_binary_wrapper(name="ribbon_bench", srcs=["microbench/ribbon_bench.cc"], deps=[], extra_preprocessor_flags=[], extra_bench_libs=True)
|
||||||
|
|
||||||
cpp_binary_wrapper(name="db_basic_bench", srcs=["microbench/db_basic_bench.cc"], deps=[], extra_preprocessor_flags=[], extra_bench_libs=True)
|
cpp_binary_wrapper(name="db_basic_bench", srcs=["microbench/db_basic_bench.cc"], deps=[], extra_preprocessor_flags=[], extra_bench_libs=True)
|
||||||
@ -711,9 +720,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_0", binary_to_bench_to_
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:51200/threads:8': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:0/wal:0/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:0/wal:0/iterations:51200/threads:8': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -769,16 +776,12 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_1", binary_to_bench_to_
|
|||||||
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:51200/threads:8': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']},
|
'threads']},
|
||||||
'ribbon_bench': {'FilterBuild/filter_impl:2/bits_per_key:20/key_len_avg:100/entry_num:1024': ['real_time',
|
'ribbon_bench': {'FilterBuild/filter_impl:2/bits_per_key:20/key_len_avg:100/entry_num:1024': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1018,9 +1021,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_6", binary_to_bench_to_
|
|||||||
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:51200/threads:8': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']},
|
'threads']},
|
||||||
'ribbon_bench': {'FilterBuild/filter_impl:2/bits_per_key:10/key_len_avg:100/entry_num:1024': ['real_time',
|
'ribbon_bench': {'FilterBuild/filter_impl:2/bits_per_key:10/key_len_avg:100/entry_num:1024': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1059,16 +1060,12 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_7", binary_to_bench_to_
|
|||||||
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:51200/threads:8': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'RandomAccessFileReaderRead/enable_statistics:0/iterations:1000000': ['real_time',
|
'RandomAccessFileReaderRead/enable_statistics:0/iterations:1000000': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1113,9 +1110,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_8", binary_to_bench_to_
|
|||||||
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:2/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']},
|
'threads']},
|
||||||
'ribbon_bench': {'FilterBuild/filter_impl:0/bits_per_key:20/key_len_avg:10/entry_num:1048576': ['real_time',
|
'ribbon_bench': {'FilterBuild/filter_impl:0/bits_per_key:20/key_len_avg:10/entry_num:1048576': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1209,9 +1204,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_10", binary_to_bench_to
|
|||||||
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']},
|
'threads']},
|
||||||
'ribbon_bench': {'FilterBuild/filter_impl:3/bits_per_key:10/key_len_avg:10/entry_num:1024': ['real_time',
|
'ribbon_bench': {'FilterBuild/filter_impl:3/bits_per_key:10/key_len_avg:10/entry_num:1024': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1243,16 +1236,12 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_11", binary_to_bench_to
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:51200/threads:8': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']}}, slow=False, expected_runtime=2446, sl_iterations=3, regression_threshold=10)
|
'threads']}}, slow=False, expected_runtime=2446, sl_iterations=3, regression_threshold=10)
|
||||||
|
|
||||||
|
|
||||||
@ -1337,9 +1326,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_13", binary_to_bench_to
|
|||||||
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']},
|
'threads']},
|
||||||
'ribbon_bench': {'FilterBuild/filter_impl:2/bits_per_key:20/key_len_avg:100/entry_num:1048576': ['real_time',
|
'ribbon_bench': {'FilterBuild/filter_impl:2/bits_per_key:20/key_len_avg:100/entry_num:1048576': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1380,9 +1367,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_14", binary_to_bench_to
|
|||||||
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:51200/threads:8': ['real_time',
|
'DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1/iterations:51200/threads:8': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads']},
|
'threads']},
|
||||||
'ribbon_bench': {'FilterBuild/filter_impl:0/bits_per_key:10/key_len_avg:10/entry_num:1024': ['real_time',
|
'ribbon_bench': {'FilterBuild/filter_impl:0/bits_per_key:10/key_len_avg:10/entry_num:1024': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1481,9 +1466,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_0_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1704,9 +1687,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_1_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -1927,9 +1908,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_2_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -2150,9 +2129,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_3_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -2373,9 +2350,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_4_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -2596,9 +2571,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_5_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -2819,9 +2792,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_6_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -3042,9 +3013,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_7_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -3265,9 +3234,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_8_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -3488,9 +3455,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_9_slow", binary_to_benc
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -3711,9 +3676,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_10_slow", binary_to_ben
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -3934,9 +3897,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_11_slow", binary_to_ben
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -4157,9 +4118,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_12_slow", binary_to_ben
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -4380,9 +4339,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_13_slow", binary_to_ben
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -4603,9 +4560,7 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_14_slow", binary_to_ben
|
|||||||
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
'DBPut/comp_style:1/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:0/iterations:409600/threads:1': ['real_time',
|
||||||
'put_mean',
|
'put_mean',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
'put_p99',
|
|
||||||
'db_size',
|
'db_size',
|
||||||
'put_p95',
|
|
||||||
'threads'],
|
'threads'],
|
||||||
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
'IteratorPrev/comp_style:1/max_data:536870912/per_key_size:256/iterations:10240': ['real_time',
|
||||||
'cpu_time',
|
'cpu_time',
|
||||||
@ -4750,6 +4705,12 @@ fancy_bench_wrapper(suite_name="rocksdb_microbench_suite_14_slow", binary_to_ben
|
|||||||
# Do not build the tests in opt mode, since SyncPoint and other test code
|
# Do not build the tests in opt mode, since SyncPoint and other test code
|
||||||
# will not be included.
|
# will not be included.
|
||||||
|
|
||||||
|
cpp_unittest_wrapper(name="agg_merge_test",
|
||||||
|
srcs=["utilities/agg_merge/agg_merge_test.cc"],
|
||||||
|
deps=[":rocksdb_test_lib"],
|
||||||
|
extra_compiler_flags=[])
|
||||||
|
|
||||||
|
|
||||||
cpp_unittest_wrapper(name="arena_test",
|
cpp_unittest_wrapper(name="arena_test",
|
||||||
srcs=["memory/arena_test.cc"],
|
srcs=["memory/arena_test.cc"],
|
||||||
deps=[":rocksdb_test_lib"],
|
deps=[":rocksdb_test_lib"],
|
||||||
@ -4984,6 +4945,12 @@ cpp_unittest_wrapper(name="comparator_db_test",
|
|||||||
extra_compiler_flags=[])
|
extra_compiler_flags=[])
|
||||||
|
|
||||||
|
|
||||||
|
cpp_unittest_wrapper(name="compressed_secondary_cache_test",
|
||||||
|
srcs=["cache/compressed_secondary_cache_test.cc"],
|
||||||
|
deps=[":rocksdb_test_lib"],
|
||||||
|
extra_compiler_flags=[])
|
||||||
|
|
||||||
|
|
||||||
cpp_unittest_wrapper(name="configurable_test",
|
cpp_unittest_wrapper(name="configurable_test",
|
||||||
srcs=["options/configurable_test.cc"],
|
srcs=["options/configurable_test.cc"],
|
||||||
deps=[":rocksdb_test_lib"],
|
deps=[":rocksdb_test_lib"],
|
||||||
@ -5478,12 +5445,6 @@ cpp_unittest_wrapper(name="lru_cache_test",
|
|||||||
extra_compiler_flags=[])
|
extra_compiler_flags=[])
|
||||||
|
|
||||||
|
|
||||||
cpp_unittest_wrapper(name="lru_secondary_cache_test",
|
|
||||||
srcs=["cache/lru_secondary_cache_test.cc"],
|
|
||||||
deps=[":rocksdb_test_lib"],
|
|
||||||
extra_compiler_flags=[])
|
|
||||||
|
|
||||||
|
|
||||||
cpp_unittest_wrapper(name="manual_compaction_test",
|
cpp_unittest_wrapper(name="manual_compaction_test",
|
||||||
srcs=["db/manual_compaction_test.cc"],
|
srcs=["db/manual_compaction_test.cc"],
|
||||||
deps=[":rocksdb_test_lib"],
|
deps=[":rocksdb_test_lib"],
|
||||||
|
File diff suppressed because it is too large
Load Diff
3180
buckifier/bench.json
3180
buckifier/bench.json
File diff suppressed because it is too large
Load Diff
10
buckifier/buckify_rocksdb.py
Normal file → Executable file
10
buckifier/buckify_rocksdb.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from __future__ import division
|
from __future__ import division
|
||||||
@ -143,7 +144,8 @@ def generate_targets(repo_path, deps_map):
|
|||||||
src_mk["LIB_SOURCES"] +
|
src_mk["LIB_SOURCES"] +
|
||||||
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
||||||
src_mk["RANGE_TREE_SOURCES"] +
|
src_mk["RANGE_TREE_SOURCES"] +
|
||||||
src_mk["TOOL_LIB_SOURCES"])
|
src_mk["TOOL_LIB_SOURCES"],
|
||||||
|
deps=["//folly/container:f14_hash"])
|
||||||
# rocksdb_whole_archive_lib
|
# rocksdb_whole_archive_lib
|
||||||
TARGETS.add_library(
|
TARGETS.add_library(
|
||||||
"rocksdb_whole_archive_lib",
|
"rocksdb_whole_archive_lib",
|
||||||
@ -151,7 +153,7 @@ def generate_targets(repo_path, deps_map):
|
|||||||
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
||||||
src_mk["RANGE_TREE_SOURCES"] +
|
src_mk["RANGE_TREE_SOURCES"] +
|
||||||
src_mk["TOOL_LIB_SOURCES"],
|
src_mk["TOOL_LIB_SOURCES"],
|
||||||
deps=None,
|
deps=["//folly/container:f14_hash"],
|
||||||
headers=None,
|
headers=None,
|
||||||
extra_external_deps="",
|
extra_external_deps="",
|
||||||
link_whole=True)
|
link_whole=True)
|
||||||
@ -183,6 +185,10 @@ def generate_targets(repo_path, deps_map):
|
|||||||
src_mk.get("ANALYZER_LIB_SOURCES", [])
|
src_mk.get("ANALYZER_LIB_SOURCES", [])
|
||||||
+ src_mk.get('STRESS_LIB_SOURCES', [])
|
+ src_mk.get('STRESS_LIB_SOURCES', [])
|
||||||
+ ["test_util/testutil.cc"])
|
+ ["test_util/testutil.cc"])
|
||||||
|
# db_stress binary
|
||||||
|
TARGETS.add_binary("db_stress",
|
||||||
|
["db_stress_tool/db_stress.cc"],
|
||||||
|
[":rocksdb_stress_lib"])
|
||||||
# bench binaries
|
# bench binaries
|
||||||
for src in src_mk.get("MICROBENCH_SOURCES", []):
|
for src in src_mk.get("MICROBENCH_SOURCES", []):
|
||||||
name = src.rsplit('/',1)[1].split('.')[0] if '/' in src else src.split('.')[0]
|
name = src.rsplit('/',1)[1].split('.')[0] if '/' in src else src.split('.')[0]
|
||||||
|
@ -63,7 +63,13 @@ if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then
|
|||||||
if [ "$LIB_MODE" == "shared" ]; then
|
if [ "$LIB_MODE" == "shared" ]; then
|
||||||
PIC_BUILD=1
|
PIC_BUILD=1
|
||||||
fi
|
fi
|
||||||
source "$PWD/build_tools/fbcode_config_platform009.sh"
|
if [ -n "$ROCKSDB_FBCODE_BUILD_WITH_PLATFORM010" ]; then
|
||||||
|
source "$PWD/build_tools/fbcode_config_platform010.sh"
|
||||||
|
elif [ -n "$ROCKSDB_FBCODE_BUILD_WITH_PLATFORM009" ]; then
|
||||||
|
source "$PWD/build_tools/fbcode_config_platform009.sh"
|
||||||
|
else
|
||||||
|
source "$PWD/build_tools/fbcode_config_platform009.sh"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Delete existing output, if it exists
|
# Delete existing output, if it exists
|
||||||
@ -463,7 +469,7 @@ EOF
|
|||||||
|
|
||||||
if ! test $ROCKSDB_DISABLE_MEMKIND; then
|
if ! test $ROCKSDB_DISABLE_MEMKIND; then
|
||||||
# Test whether memkind library is installed
|
# Test whether memkind library is installed
|
||||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -lmemkind -x c++ - -o test.o 2>/dev/null <<EOF
|
$CXX $PLATFORM_CXXFLAGS $LDFLAGS -x c++ - -o test.o -lmemkind 2>/dev/null <<EOF
|
||||||
#include <memkind.h>
|
#include <memkind.h>
|
||||||
int main() {
|
int main() {
|
||||||
memkind_malloc(MEMKIND_DAX_KMEM, 1024);
|
memkind_malloc(MEMKIND_DAX_KMEM, 1024);
|
||||||
@ -874,8 +880,8 @@ if test -n "$WITH_JEMALLOC_FLAG"; then
|
|||||||
echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT"
|
echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT"
|
||||||
fi
|
fi
|
||||||
echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT"
|
echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT"
|
||||||
if test -n "$USE_FOLLY_DISTRIBUTED_MUTEX"; then
|
if test -n "$USE_FOLLY"; then
|
||||||
echo "USE_FOLLY_DISTRIBUTED_MUTEX=$USE_FOLLY_DISTRIBUTED_MUTEX" >> "$OUTPUT"
|
echo "USE_FOLLY=$USE_FOLLY" >> "$OUTPUT"
|
||||||
fi
|
fi
|
||||||
if test -n "$PPC_LIBC_IS_GNU"; then
|
if test -n "$PPC_LIBC_IS_GNU"; then
|
||||||
echo "PPC_LIBC_IS_GNU=$PPC_LIBC_IS_GNU" >> "$OUTPUT"
|
echo "PPC_LIBC_IS_GNU=$PPC_LIBC_IS_GNU" >> "$OUTPUT"
|
||||||
|
@ -19,3 +19,4 @@ BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/08634589372fa5f237bfd374e8c644a836
|
|||||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/6ae525939ad02e5e676855082fbbc7828dbafeac/3.15.0/platform009/7f3b187
|
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/6ae525939ad02e5e676855082fbbc7828dbafeac/3.15.0/platform009/7f3b187
|
||||||
LUA_BASE=/mnt/gvfs/third-party2/lua/162efd9561a3d21f6869f4814011e9cf1b3ff4dc/5.3.4/platform009/a6271c4
|
LUA_BASE=/mnt/gvfs/third-party2/lua/162efd9561a3d21f6869f4814011e9cf1b3ff4dc/5.3.4/platform009/a6271c4
|
||||||
BENCHMARK_BASE=/mnt/gvfs/third-party2/benchmark/30bf49ad6414325e17f3425b0edcb64239427ae3/1.6.1/platform009/7f3b187
|
BENCHMARK_BASE=/mnt/gvfs/third-party2/benchmark/30bf49ad6414325e17f3425b0edcb64239427ae3/1.6.1/platform009/7f3b187
|
||||||
|
BOOST_BASE=/mnt/gvfs/third-party2/boost/201b7d74941e54b436dfa364a063aa6d2cd7de4c/1.69.0/platform009/8a7ffdf
|
||||||
|
22
build_tools/dependencies_platform010.sh
Normal file
22
build_tools/dependencies_platform010.sh
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||||
|
# The file is generated using update_dependencies.sh.
|
||||||
|
GCC_BASE=/mnt/gvfs/third-party2/gcc/e40bde78650fa91b8405a857e3f10bf336633fb0/11.x/centos7-native/886b5eb
|
||||||
|
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/2043340983c032915adbb6f78903dc855b65aee8/12/platform010/9520e0f
|
||||||
|
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/c00dcc6a3e4125c7e8b248e9a79c14b78ac9e0ca/11.x/platform010/5684a5a
|
||||||
|
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/0b9c8e4b060eda62f3bc1c6127bbe1256697569b/2.34/platform010/f259413
|
||||||
|
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/bc9647f7912b131315827d65cb6189c21f381d05/1.1.3/platform010/76ebdda
|
||||||
|
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/a6f5f3f1d063d2d00cd02fc12f0f05fc3ab3a994/1.2.11/platform010/76ebdda
|
||||||
|
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/09703139cfc376bd8a82642385a0e97726b28287/1.0.6/platform010/76ebdda
|
||||||
|
LZ4_BASE=/mnt/gvfs/third-party2/lz4/60220d6a5bf7722b9cc239a1368c596619b12060/1.9.1/platform010/76ebdda
|
||||||
|
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/50eace8143eaaea9473deae1f3283e0049e05633/1.4.x/platform010/64091f4
|
||||||
|
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/5d27e5919771603da06000a027b12f799e58a4f7/2.2.0/platform010/76ebdda
|
||||||
|
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/b62912d333ef33f9760efa6219dbe3fe6abb3b0e/master/platform010/f57cc4a
|
||||||
|
NUMA_BASE=/mnt/gvfs/third-party2/numa/6b412770957aa3c8a87e5e0dcd8cc2f45f393bc0/2.0.11/platform010/76ebdda
|
||||||
|
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/52f69816e936e147664ad717eb71a1a0e9dc973a/1.4/platform010/5074a48
|
||||||
|
TBB_BASE=/mnt/gvfs/third-party2/tbb/c9cc192099fa84c0dcd0ffeedd44a373ad6e4925/2018_U5/platform010/76ebdda
|
||||||
|
LIBURING_BASE=/mnt/gvfs/third-party2/liburing/a98e2d137007e3ebf7f33bd6f99c2c56bdaf8488/20210212/platform010/76ebdda
|
||||||
|
BENCHMARK_BASE=/mnt/gvfs/third-party2/benchmark/780c7a0f9cf0967961e69ad08e61cddd85d61821/trunk/platform010/76ebdda
|
||||||
|
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/02d9f76aaaba580611cf75e741753c800c7fdc12/fb/platform010/da39a3e
|
||||||
|
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/938dc3f064ef3a48c0446f5b11d788d50b3eb5ee/2.37/centos7-native/da39a3e
|
||||||
|
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/429a6b3203eb415f1599bd15183659153129188e/3.15.0/platform010/76ebdda
|
||||||
|
LUA_BASE=/mnt/gvfs/third-party2/lua/363787fa5cac2a8aa20638909210443278fa138e/5.3.4/platform010/9079c97
|
@ -21,38 +21,48 @@ LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
|
|||||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||||
|
|
||||||
# snappy
|
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
# snappy
|
||||||
if test -z $PIC_BUILD; then
|
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
if test -z $PIC_BUILD; then
|
||||||
else
|
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
||||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
|
else
|
||||||
fi
|
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
|
||||||
CFLAGS+=" -DSNAPPY"
|
fi
|
||||||
|
CFLAGS+=" -DSNAPPY"
|
||||||
if test -z $PIC_BUILD; then
|
|
||||||
# location of zlib headers and libraries
|
|
||||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
|
||||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
|
||||||
CFLAGS+=" -DZLIB"
|
|
||||||
|
|
||||||
# location of bzip headers and libraries
|
|
||||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
|
||||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
|
||||||
CFLAGS+=" -DBZIP2"
|
|
||||||
|
|
||||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
|
||||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
|
||||||
CFLAGS+=" -DLZ4"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
|
||||||
if test -z $PIC_BUILD; then
|
if test -z $PIC_BUILD; then
|
||||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||||
else
|
# location of zlib headers and libraries
|
||||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd_pic.a"
|
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||||
|
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
||||||
|
CFLAGS+=" -DZLIB"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||||
|
# location of bzip headers and libraries
|
||||||
|
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||||
|
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
||||||
|
CFLAGS+=" -DBZIP2"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||||
|
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||||
|
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
||||||
|
CFLAGS+=" -DLZ4"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||||
|
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||||
|
if test -z $PIC_BUILD; then
|
||||||
|
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
||||||
|
else
|
||||||
|
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd_pic.a"
|
||||||
|
fi
|
||||||
|
CFLAGS+=" -DZSTD -DZSTD_STATIC_LINKING_ONLY"
|
||||||
fi
|
fi
|
||||||
CFLAGS+=" -DZSTD -DZSTD_STATIC_LINKING_ONLY"
|
|
||||||
|
|
||||||
# location of gflags headers and libraries
|
# location of gflags headers and libraries
|
||||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||||
@ -162,6 +172,4 @@ else
|
|||||||
LUA_LIB=" $LUA_PATH/lib/liblua_pic.a"
|
LUA_LIB=" $LUA_PATH/lib/liblua_pic.a"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
USE_FOLLY_DISTRIBUTED_MUTEX=1
|
|
||||||
|
|
||||||
export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
||||||
|
@ -27,28 +27,38 @@ else
|
|||||||
MAYBE_PIC=_pic
|
MAYBE_PIC=_pic
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# snappy
|
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
# snappy
|
||||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy${MAYBE_PIC}.a"
|
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||||
CFLAGS+=" -DSNAPPY"
|
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DSNAPPY"
|
||||||
|
fi
|
||||||
|
|
||||||
# location of zlib headers and libraries
|
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
# location of zlib headers and libraries
|
||||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz${MAYBE_PIC}.a"
|
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||||
CFLAGS+=" -DZLIB"
|
ZLIB_LIBS=" $ZLIB_BASE/lib/libz${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DZLIB"
|
||||||
|
fi
|
||||||
|
|
||||||
# location of bzip headers and libraries
|
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
# location of bzip headers and libraries
|
||||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2${MAYBE_PIC}.a"
|
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||||
CFLAGS+=" -DBZIP2"
|
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DBZIP2"
|
||||||
|
fi
|
||||||
|
|
||||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4${MAYBE_PIC}.a"
|
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||||
CFLAGS+=" -DLZ4"
|
LZ4_LIBS=" $LZ4_BASE/lib/liblz4${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DLZ4"
|
||||||
|
fi
|
||||||
|
|
||||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd${MAYBE_PIC}.a"
|
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||||
CFLAGS+=" -DZSTD"
|
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DZSTD"
|
||||||
|
fi
|
||||||
|
|
||||||
# location of gflags headers and libraries
|
# location of gflags headers and libraries
|
||||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||||
@ -58,6 +68,8 @@ CFLAGS+=" -DGFLAGS=gflags"
|
|||||||
BENCHMARK_INCLUDE=" -I $BENCHMARK_BASE/include/"
|
BENCHMARK_INCLUDE=" -I $BENCHMARK_BASE/include/"
|
||||||
BENCHMARK_LIBS=" $BENCHMARK_BASE/lib/libbenchmark${MAYBE_PIC}.a"
|
BENCHMARK_LIBS=" $BENCHMARK_BASE/lib/libbenchmark${MAYBE_PIC}.a"
|
||||||
|
|
||||||
|
BOOST_INCLUDE=" -I $BOOST_BASE/include/"
|
||||||
|
|
||||||
# location of jemalloc
|
# location of jemalloc
|
||||||
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
||||||
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc${MAYBE_PIC}.a"
|
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc${MAYBE_PIC}.a"
|
||||||
@ -89,7 +101,7 @@ BINUTILS="$BINUTILS_BASE/bin"
|
|||||||
AR="$BINUTILS/ar"
|
AR="$BINUTILS/ar"
|
||||||
AS="$BINUTILS/as"
|
AS="$BINUTILS/as"
|
||||||
|
|
||||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE $LIBURING_INCLUDE $BENCHMARK_INCLUDE"
|
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE $LIBURING_INCLUDE $BENCHMARK_INCLUDE $BOOST_INCLUDE"
|
||||||
|
|
||||||
STDLIBS="-L $GCC_BASE/lib64"
|
STDLIBS="-L $GCC_BASE/lib64"
|
||||||
|
|
||||||
|
175
build_tools/fbcode_config_platform010.sh
Normal file
175
build_tools/fbcode_config_platform010.sh
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Set environment variables so that we can compile rocksdb using
|
||||||
|
# fbcode settings. It uses the latest g++ and clang compilers and also
|
||||||
|
# uses jemalloc
|
||||||
|
# Environment variables that change the behavior of this script:
|
||||||
|
# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included
|
||||||
|
|
||||||
|
|
||||||
|
BASEDIR=`dirname $BASH_SOURCE`
|
||||||
|
source "$BASEDIR/dependencies_platform010.sh"
|
||||||
|
|
||||||
|
# Disallow using libraries from default locations as they might not be compatible with platform010 libraries.
|
||||||
|
CFLAGS=" --sysroot=/DOES/NOT/EXIST"
|
||||||
|
|
||||||
|
# libgcc
|
||||||
|
LIBGCC_INCLUDE="$LIBGCC_BASE/include/c++/trunk"
|
||||||
|
LIBGCC_LIBS=" -L $LIBGCC_BASE/lib -B$LIBGCC_BASE/lib/gcc/x86_64-facebook-linux/trunk/"
|
||||||
|
|
||||||
|
# glibc
|
||||||
|
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||||
|
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||||
|
GLIBC_LIBS+=" -B$GLIBC_BASE/lib"
|
||||||
|
|
||||||
|
if test -z $PIC_BUILD; then
|
||||||
|
MAYBE_PIC=
|
||||||
|
else
|
||||||
|
MAYBE_PIC=_pic
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||||
|
# snappy
|
||||||
|
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||||
|
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DSNAPPY"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||||
|
# location of zlib headers and libraries
|
||||||
|
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||||
|
ZLIB_LIBS=" $ZLIB_BASE/lib/libz${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DZLIB"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||||
|
# location of bzip headers and libraries
|
||||||
|
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||||
|
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DBZIP2"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||||
|
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||||
|
LZ4_LIBS=" $LZ4_BASE/lib/liblz4${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DLZ4"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||||
|
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||||
|
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DZSTD"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# location of gflags headers and libraries
|
||||||
|
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||||
|
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DGFLAGS=gflags"
|
||||||
|
|
||||||
|
BENCHMARK_INCLUDE=" -I $BENCHMARK_BASE/include/"
|
||||||
|
BENCHMARK_LIBS=" $BENCHMARK_BASE/lib/libbenchmark${MAYBE_PIC}.a"
|
||||||
|
|
||||||
|
# location of jemalloc
|
||||||
|
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
||||||
|
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc${MAYBE_PIC}.a"
|
||||||
|
|
||||||
|
# location of numa
|
||||||
|
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||||
|
NUMA_LIB=" $NUMA_BASE/lib/libnuma${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DNUMA"
|
||||||
|
|
||||||
|
# location of libunwind
|
||||||
|
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind${MAYBE_PIC}.a"
|
||||||
|
|
||||||
|
# location of TBB
|
||||||
|
TBB_INCLUDE=" -isystem $TBB_BASE/include/"
|
||||||
|
TBB_LIBS="$TBB_BASE/lib/libtbb${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DTBB"
|
||||||
|
|
||||||
|
# location of LIBURING
|
||||||
|
LIBURING_INCLUDE=" -isystem $LIBURING_BASE/include/"
|
||||||
|
LIBURING_LIBS="$LIBURING_BASE/lib/liburing${MAYBE_PIC}.a"
|
||||||
|
CFLAGS+=" -DLIBURING"
|
||||||
|
|
||||||
|
test "$USE_SSE" || USE_SSE=1
|
||||||
|
export USE_SSE
|
||||||
|
test "$PORTABLE" || PORTABLE=1
|
||||||
|
export PORTABLE
|
||||||
|
|
||||||
|
BINUTILS="$BINUTILS_BASE/bin"
|
||||||
|
AR="$BINUTILS/ar"
|
||||||
|
AS="$BINUTILS/as"
|
||||||
|
|
||||||
|
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE $LIBURING_INCLUDE $BENCHMARK_INCLUDE"
|
||||||
|
|
||||||
|
STDLIBS="-L $GCC_BASE/lib64"
|
||||||
|
|
||||||
|
CLANG_BIN="$CLANG_BASE/bin"
|
||||||
|
CLANG_LIB="$CLANG_BASE/lib"
|
||||||
|
CLANG_SRC="$CLANG_BASE/../../src"
|
||||||
|
|
||||||
|
CLANG_ANALYZER="$CLANG_BIN/clang++"
|
||||||
|
CLANG_SCAN_BUILD="$CLANG_SRC/llvm/clang/tools/scan-build/bin/scan-build"
|
||||||
|
|
||||||
|
if [ -z "$USE_CLANG" ]; then
|
||||||
|
# gcc
|
||||||
|
CC="$GCC_BASE/bin/gcc"
|
||||||
|
CXX="$GCC_BASE/bin/g++"
|
||||||
|
AR="$GCC_BASE/bin/gcc-ar"
|
||||||
|
|
||||||
|
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||||
|
CFLAGS+=" -I$GCC_BASE/include"
|
||||||
|
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include"
|
||||||
|
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/install-tools/include"
|
||||||
|
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include-fixed/"
|
||||||
|
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||||
|
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||||
|
CFLAGS+=" -I$GLIBC_INCLUDE"
|
||||||
|
CFLAGS+=" -I$LIBGCC_BASE/include"
|
||||||
|
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/"
|
||||||
|
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/x86_64-facebook-linux/"
|
||||||
|
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/backward"
|
||||||
|
CFLAGS+=" -isystem $GLIBC_INCLUDE -I$GLIBC_INCLUDE"
|
||||||
|
JEMALLOC=1
|
||||||
|
else
|
||||||
|
# clang
|
||||||
|
CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
|
||||||
|
CC="$CLANG_BIN/clang"
|
||||||
|
CXX="$CLANG_BIN/clang++"
|
||||||
|
AR="$CLANG_BIN/llvm-ar"
|
||||||
|
|
||||||
|
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||||
|
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/trunk "
|
||||||
|
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/trunk/x86_64-facebook-linux "
|
||||||
|
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||||
|
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||||
|
CFLAGS+=" -isystem $CLANG_INCLUDE"
|
||||||
|
CFLAGS+=" -Wno-expansion-to-defined "
|
||||||
|
CXXFLAGS="-nostdinc++"
|
||||||
|
fi
|
||||||
|
|
||||||
|
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
|
||||||
|
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
|
||||||
|
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
|
||||||
|
|
||||||
|
CFLAGS+=" $DEPS_INCLUDE"
|
||||||
|
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42 -DROCKSDB_IOURING_PRESENT"
|
||||||
|
CXXFLAGS+=" $CFLAGS"
|
||||||
|
|
||||||
|
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||||
|
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/platform010/lib/ld.so"
|
||||||
|
EXEC_LDFLAGS+=" $LIBUNWIND"
|
||||||
|
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/platform010/lib"
|
||||||
|
EXEC_LDFLAGS+=" -Wl,-rpath=$GCC_BASE/lib64"
|
||||||
|
# required by libtbb
|
||||||
|
EXEC_LDFLAGS+=" -ldl"
|
||||||
|
|
||||||
|
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
|
||||||
|
PLATFORM_LDFLAGS+=" -B$BINUTILS"
|
||||||
|
|
||||||
|
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||||
|
|
||||||
|
VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||||
|
|
||||||
|
export CC CXX AR AS CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
@ -9,6 +9,7 @@ OUTPUT=""
|
|||||||
function log_header()
|
function log_header()
|
||||||
{
|
{
|
||||||
echo "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved." >> "$OUTPUT"
|
echo "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved." >> "$OUTPUT"
|
||||||
|
echo "# The file is generated using update_dependencies.sh." >> "$OUTPUT"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -18,7 +19,7 @@ function log_variable()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TP2_LATEST="/mnt/vol/engshare/fbcode/third-party2"
|
TP2_LATEST="/data/users/$USER/fbsource/fbcode/third-party2/"
|
||||||
## $1 => lib name
|
## $1 => lib name
|
||||||
## $2 => lib version (if not provided, will try to pick latest)
|
## $2 => lib version (if not provided, will try to pick latest)
|
||||||
## $3 => platform (if not provided, will try to pick latest gcc)
|
## $3 => platform (if not provided, will try to pick latest gcc)
|
||||||
@ -50,6 +51,8 @@ function get_lib_base()
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
result=`ls -1d $result/*/ | head -n1`
|
result=`ls -1d $result/*/ | head -n1`
|
||||||
|
|
||||||
|
echo Finding link $result
|
||||||
|
|
||||||
# lib_name => LIB_NAME_BASE
|
# lib_name => LIB_NAME_BASE
|
||||||
local __res_var=${lib_name^^}"_BASE"
|
local __res_var=${lib_name^^}"_BASE"
|
||||||
@ -61,10 +64,10 @@ function get_lib_base()
|
|||||||
}
|
}
|
||||||
|
|
||||||
###########################################################
|
###########################################################
|
||||||
# platform007 dependencies #
|
# platform010 dependencies #
|
||||||
###########################################################
|
###########################################################
|
||||||
|
|
||||||
OUTPUT="$BASEDIR/dependencies_platform007.sh"
|
OUTPUT="$BASEDIR/dependencies_platform010.sh"
|
||||||
|
|
||||||
rm -f "$OUTPUT"
|
rm -f "$OUTPUT"
|
||||||
touch "$OUTPUT"
|
touch "$OUTPUT"
|
||||||
@ -72,40 +75,42 @@ touch "$OUTPUT"
|
|||||||
echo "Writing dependencies to $OUTPUT"
|
echo "Writing dependencies to $OUTPUT"
|
||||||
|
|
||||||
# Compilers locations
|
# Compilers locations
|
||||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/7.x/centos7-native/*/`
|
GCC_BASE=`readlink -f $TP2_LATEST/gcc/11.x/centos7-native/*/`
|
||||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos7-native/*/`
|
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/12/platform010/*/`
|
||||||
|
|
||||||
log_header
|
log_header
|
||||||
log_variable GCC_BASE
|
log_variable GCC_BASE
|
||||||
log_variable CLANG_BASE
|
log_variable CLANG_BASE
|
||||||
|
|
||||||
# Libraries locations
|
# Libraries locations
|
||||||
get_lib_base libgcc 7.x platform007
|
get_lib_base libgcc 11.x platform010
|
||||||
get_lib_base glibc 2.26 platform007
|
get_lib_base glibc 2.34 platform010
|
||||||
get_lib_base snappy LATEST platform007
|
get_lib_base snappy LATEST platform010
|
||||||
get_lib_base zlib LATEST platform007
|
get_lib_base zlib LATEST platform010
|
||||||
get_lib_base bzip2 LATEST platform007
|
get_lib_base bzip2 LATEST platform010
|
||||||
get_lib_base lz4 LATEST platform007
|
get_lib_base lz4 LATEST platform010
|
||||||
get_lib_base zstd LATEST platform007
|
get_lib_base zstd LATEST platform010
|
||||||
get_lib_base gflags LATEST platform007
|
get_lib_base gflags LATEST platform010
|
||||||
get_lib_base jemalloc LATEST platform007
|
get_lib_base jemalloc LATEST platform010
|
||||||
get_lib_base numa LATEST platform007
|
get_lib_base numa LATEST platform010
|
||||||
get_lib_base libunwind LATEST platform007
|
get_lib_base libunwind LATEST platform010
|
||||||
get_lib_base tbb LATEST platform007
|
get_lib_base tbb 2018_U5 platform010
|
||||||
get_lib_base liburing LATEST platform007
|
get_lib_base liburing LATEST platform010
|
||||||
|
get_lib_base benchmark LATEST platform010
|
||||||
|
|
||||||
get_lib_base kernel-headers fb platform007
|
get_lib_base kernel-headers fb platform010
|
||||||
get_lib_base binutils LATEST centos7-native
|
get_lib_base binutils LATEST centos7-native
|
||||||
get_lib_base valgrind LATEST platform007
|
get_lib_base valgrind LATEST platform010
|
||||||
get_lib_base lua 5.3.4 platform007
|
get_lib_base lua 5.3.4 platform010
|
||||||
|
|
||||||
git diff $OUTPUT
|
git diff $OUTPUT
|
||||||
|
|
||||||
|
|
||||||
###########################################################
|
###########################################################
|
||||||
# 5.x dependencies #
|
# platform009 dependencies #
|
||||||
###########################################################
|
###########################################################
|
||||||
|
|
||||||
OUTPUT="$BASEDIR/dependencies.sh"
|
OUTPUT="$BASEDIR/dependencies_platform009.sh"
|
||||||
|
|
||||||
rm -f "$OUTPUT"
|
rm -f "$OUTPUT"
|
||||||
touch "$OUTPUT"
|
touch "$OUTPUT"
|
||||||
@ -113,70 +118,32 @@ touch "$OUTPUT"
|
|||||||
echo "Writing dependencies to $OUTPUT"
|
echo "Writing dependencies to $OUTPUT"
|
||||||
|
|
||||||
# Compilers locations
|
# Compilers locations
|
||||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/5.x/centos7-native/*/`
|
GCC_BASE=`readlink -f $TP2_LATEST/gcc/9.x/centos7-native/*/`
|
||||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos7-native/*/`
|
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/9.0.0/platform009/*/`
|
||||||
|
|
||||||
log_header
|
log_header
|
||||||
log_variable GCC_BASE
|
log_variable GCC_BASE
|
||||||
log_variable CLANG_BASE
|
log_variable CLANG_BASE
|
||||||
|
|
||||||
# Libraries locations
|
# Libraries locations
|
||||||
get_lib_base libgcc 5.x gcc-5-glibc-2.23
|
get_lib_base libgcc 9.x platform009
|
||||||
get_lib_base glibc 2.23 gcc-5-glibc-2.23
|
get_lib_base glibc 2.30 platform009
|
||||||
get_lib_base snappy LATEST gcc-5-glibc-2.23
|
get_lib_base snappy LATEST platform009
|
||||||
get_lib_base zlib LATEST gcc-5-glibc-2.23
|
get_lib_base zlib LATEST platform009
|
||||||
get_lib_base bzip2 LATEST gcc-5-glibc-2.23
|
get_lib_base bzip2 LATEST platform009
|
||||||
get_lib_base lz4 LATEST gcc-5-glibc-2.23
|
get_lib_base lz4 LATEST platform009
|
||||||
get_lib_base zstd LATEST gcc-5-glibc-2.23
|
get_lib_base zstd LATEST platform009
|
||||||
get_lib_base gflags LATEST gcc-5-glibc-2.23
|
get_lib_base gflags LATEST platform009
|
||||||
get_lib_base jemalloc LATEST gcc-5-glibc-2.23
|
get_lib_base jemalloc LATEST platform009
|
||||||
get_lib_base numa LATEST gcc-5-glibc-2.23
|
get_lib_base numa LATEST platform009
|
||||||
get_lib_base libunwind LATEST gcc-5-glibc-2.23
|
get_lib_base libunwind LATEST platform009
|
||||||
get_lib_base tbb LATEST gcc-5-glibc-2.23
|
get_lib_base tbb 2018_U5 platform009
|
||||||
|
get_lib_base liburing LATEST platform009
|
||||||
|
get_lib_base benchmark LATEST platform009
|
||||||
|
|
||||||
get_lib_base kernel-headers 4.0.9-36_fbk5_2933_gd092e3f gcc-5-glibc-2.23
|
get_lib_base kernel-headers fb platform009
|
||||||
get_lib_base binutils LATEST centos7-native
|
get_lib_base binutils LATEST centos7-native
|
||||||
get_lib_base valgrind LATEST gcc-5-glibc-2.23
|
get_lib_base valgrind LATEST platform009
|
||||||
get_lib_base lua 5.2.3 gcc-5-glibc-2.23
|
get_lib_base lua 5.3.4 platform009
|
||||||
|
|
||||||
git diff $OUTPUT
|
|
||||||
|
|
||||||
###########################################################
|
|
||||||
# 4.8.1 dependencies #
|
|
||||||
###########################################################
|
|
||||||
|
|
||||||
OUTPUT="$BASEDIR/dependencies_4.8.1.sh"
|
|
||||||
|
|
||||||
rm -f "$OUTPUT"
|
|
||||||
touch "$OUTPUT"
|
|
||||||
|
|
||||||
echo "Writing 4.8.1 dependencies to $OUTPUT"
|
|
||||||
|
|
||||||
# Compilers locations
|
|
||||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/4.8.1/centos6-native/*/`
|
|
||||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos6-native/*/`
|
|
||||||
|
|
||||||
log_header
|
|
||||||
log_variable GCC_BASE
|
|
||||||
log_variable CLANG_BASE
|
|
||||||
|
|
||||||
# Libraries locations
|
|
||||||
get_lib_base libgcc 4.8.1 gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base glibc 2.17 gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base snappy LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base zlib LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base bzip2 LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base lz4 LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base zstd LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base gflags LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base jemalloc LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base numa LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base libunwind LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base tbb 4.0_update2 gcc-4.8.1-glibc-2.17
|
|
||||||
|
|
||||||
get_lib_base kernel-headers LATEST gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base binutils LATEST centos6-native
|
|
||||||
get_lib_base valgrind 3.8.1 gcc-4.8.1-glibc-2.17
|
|
||||||
get_lib_base lua 5.2.3 centos6-native
|
|
||||||
|
|
||||||
git diff $OUTPUT
|
git diff $OUTPUT
|
||||||
|
66
cache/cache_entry_roles.cc
vendored
66
cache/cache_entry_roles.cc
vendored
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
std::array<const char*, kNumCacheEntryRoles> kCacheEntryRoleToCamelString{{
|
std::array<std::string, kNumCacheEntryRoles> kCacheEntryRoleToCamelString{{
|
||||||
"DataBlock",
|
"DataBlock",
|
||||||
"FilterBlock",
|
"FilterBlock",
|
||||||
"FilterMetaBlock",
|
"FilterMetaBlock",
|
||||||
@ -25,7 +25,7 @@ std::array<const char*, kNumCacheEntryRoles> kCacheEntryRoleToCamelString{{
|
|||||||
"Misc",
|
"Misc",
|
||||||
}};
|
}};
|
||||||
|
|
||||||
std::array<const char*, kNumCacheEntryRoles> kCacheEntryRoleToHyphenString{{
|
std::array<std::string, kNumCacheEntryRoles> kCacheEntryRoleToHyphenString{{
|
||||||
"data-block",
|
"data-block",
|
||||||
"filter-block",
|
"filter-block",
|
||||||
"filter-meta-block",
|
"filter-meta-block",
|
||||||
@ -39,16 +39,72 @@ std::array<const char*, kNumCacheEntryRoles> kCacheEntryRoleToHyphenString{{
|
|||||||
"misc",
|
"misc",
|
||||||
}};
|
}};
|
||||||
|
|
||||||
|
const std::string& GetCacheEntryRoleName(CacheEntryRole role) {
|
||||||
|
return kCacheEntryRoleToHyphenString[static_cast<size_t>(role)];
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string& BlockCacheEntryStatsMapKeys::CacheId() {
|
||||||
|
static const std::string kCacheId = "id";
|
||||||
|
return kCacheId;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string& BlockCacheEntryStatsMapKeys::CacheCapacityBytes() {
|
||||||
|
static const std::string kCacheCapacityBytes = "capacity";
|
||||||
|
return kCacheCapacityBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string&
|
||||||
|
BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds() {
|
||||||
|
static const std::string kLastCollectionDurationSeconds =
|
||||||
|
"secs_for_last_collection";
|
||||||
|
return kLastCollectionDurationSeconds;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string& BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds() {
|
||||||
|
static const std::string kLastCollectionAgeSeconds =
|
||||||
|
"secs_since_last_collection";
|
||||||
|
return kLastCollectionAgeSeconds;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
std::string GetPrefixedCacheEntryRoleName(const std::string& prefix,
|
||||||
|
CacheEntryRole role) {
|
||||||
|
const std::string& role_name = GetCacheEntryRoleName(role);
|
||||||
|
std::string prefixed_role_name;
|
||||||
|
prefixed_role_name.reserve(prefix.size() + role_name.size());
|
||||||
|
prefixed_role_name.append(prefix);
|
||||||
|
prefixed_role_name.append(role_name);
|
||||||
|
return prefixed_role_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
std::string BlockCacheEntryStatsMapKeys::EntryCount(CacheEntryRole role) {
|
||||||
|
const static std::string kPrefix = "count.";
|
||||||
|
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string BlockCacheEntryStatsMapKeys::UsedBytes(CacheEntryRole role) {
|
||||||
|
const static std::string kPrefix = "bytes.";
|
||||||
|
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string BlockCacheEntryStatsMapKeys::UsedPercent(CacheEntryRole role) {
|
||||||
|
const static std::string kPrefix = "percent.";
|
||||||
|
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||||
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct Registry {
|
struct Registry {
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
std::unordered_map<Cache::DeleterFn, CacheEntryRole> role_map;
|
UnorderedMap<Cache::DeleterFn, CacheEntryRole> role_map;
|
||||||
void Register(Cache::DeleterFn fn, CacheEntryRole role) {
|
void Register(Cache::DeleterFn fn, CacheEntryRole role) {
|
||||||
std::lock_guard<std::mutex> lock(mutex);
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
role_map[fn] = role;
|
role_map[fn] = role;
|
||||||
}
|
}
|
||||||
std::unordered_map<Cache::DeleterFn, CacheEntryRole> Copy() {
|
UnorderedMap<Cache::DeleterFn, CacheEntryRole> Copy() {
|
||||||
std::lock_guard<std::mutex> lock(mutex);
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
return role_map;
|
return role_map;
|
||||||
}
|
}
|
||||||
@ -65,7 +121,7 @@ void RegisterCacheDeleterRole(Cache::DeleterFn fn, CacheEntryRole role) {
|
|||||||
GetRegistry().Register(fn, role);
|
GetRegistry().Register(fn, role);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unordered_map<Cache::DeleterFn, CacheEntryRole> CopyCacheDeleterRoleMap() {
|
UnorderedMap<Cache::DeleterFn, CacheEntryRole> CopyCacheDeleterRoleMap() {
|
||||||
return GetRegistry().Copy();
|
return GetRegistry().Copy();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
42
cache/cache_entry_roles.h
vendored
42
cache/cache_entry_roles.h
vendored
@ -9,49 +9,15 @@
|
|||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
#include "rocksdb/cache.h"
|
#include "rocksdb/cache.h"
|
||||||
|
#include "util/hash_containers.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
// Classifications of block cache entries, for reporting statistics
|
extern std::array<std::string, kNumCacheEntryRoles>
|
||||||
// Adding new enum to this class requires corresponding updates to
|
|
||||||
// kCacheEntryRoleToCamelString and kCacheEntryRoleToHyphenString
|
|
||||||
enum class CacheEntryRole {
|
|
||||||
// Block-based table data block
|
|
||||||
kDataBlock,
|
|
||||||
// Block-based table filter block (full or partitioned)
|
|
||||||
kFilterBlock,
|
|
||||||
// Block-based table metadata block for partitioned filter
|
|
||||||
kFilterMetaBlock,
|
|
||||||
// Block-based table deprecated filter block (old "block-based" filter)
|
|
||||||
kDeprecatedFilterBlock,
|
|
||||||
// Block-based table index block
|
|
||||||
kIndexBlock,
|
|
||||||
// Other kinds of block-based table block
|
|
||||||
kOtherBlock,
|
|
||||||
// WriteBufferManager reservations to account for memtable usage
|
|
||||||
kWriteBuffer,
|
|
||||||
// BlockBasedTableBuilder reservations to account for
|
|
||||||
// compression dictionary building buffer's memory usage
|
|
||||||
kCompressionDictionaryBuildingBuffer,
|
|
||||||
// Filter reservations to account for
|
|
||||||
// (new) bloom and ribbon filter construction's memory usage
|
|
||||||
kFilterConstruction,
|
|
||||||
// BlockBasedTableReader reservations to account for
|
|
||||||
// its memory usage
|
|
||||||
kBlockBasedTableReader,
|
|
||||||
// Default bucket, for miscellaneous cache entries. Do not use for
|
|
||||||
// entries that could potentially add up to large usage.
|
|
||||||
kMisc,
|
|
||||||
};
|
|
||||||
constexpr uint32_t kNumCacheEntryRoles =
|
|
||||||
static_cast<uint32_t>(CacheEntryRole::kMisc) + 1;
|
|
||||||
|
|
||||||
extern std::array<const char*, kNumCacheEntryRoles>
|
|
||||||
kCacheEntryRoleToCamelString;
|
kCacheEntryRoleToCamelString;
|
||||||
extern std::array<const char*, kNumCacheEntryRoles>
|
extern std::array<std::string, kNumCacheEntryRoles>
|
||||||
kCacheEntryRoleToHyphenString;
|
kCacheEntryRoleToHyphenString;
|
||||||
|
|
||||||
// To associate cache entries with their role, we use a hack on the
|
// To associate cache entries with their role, we use a hack on the
|
||||||
@ -78,7 +44,7 @@ void RegisterCacheDeleterRole(Cache::DeleterFn fn, CacheEntryRole role);
|
|||||||
// * This is suitable for preparing for batch operations, like with
|
// * This is suitable for preparing for batch operations, like with
|
||||||
// CacheEntryStatsCollector.
|
// CacheEntryStatsCollector.
|
||||||
// * The number of mappings should be sufficiently small (dozens).
|
// * The number of mappings should be sufficiently small (dozens).
|
||||||
std::unordered_map<Cache::DeleterFn, CacheEntryRole> CopyCacheDeleterRoleMap();
|
UnorderedMap<Cache::DeleterFn, CacheEntryRole> CopyCacheDeleterRoleMap();
|
||||||
|
|
||||||
// ************************************************************** //
|
// ************************************************************** //
|
||||||
// An automatic registration infrastructure. This enables code
|
// An automatic registration infrastructure. This enables code
|
||||||
|
9
cache/cache_reservation_manager_test.cc
vendored
9
cache/cache_reservation_manager_test.cc
vendored
@ -47,13 +47,14 @@ TEST_F(CacheReservationManagerTest, GenerateCacheKey) {
|
|||||||
|
|
||||||
// Next unique Cache key
|
// Next unique Cache key
|
||||||
CacheKey ckey = CacheKey::CreateUniqueForCacheLifetime(cache.get());
|
CacheKey ckey = CacheKey::CreateUniqueForCacheLifetime(cache.get());
|
||||||
// Back it up to the one used by CRM (using CacheKey implementation details)
|
// Get to the underlying values
|
||||||
using PairU64 = std::pair<uint64_t, uint64_t>;
|
using PairU64 = std::array<uint64_t, 2>;
|
||||||
auto& ckey_pair = *reinterpret_cast<PairU64*>(&ckey);
|
auto& ckey_pair = *reinterpret_cast<PairU64*>(&ckey);
|
||||||
ckey_pair.second--;
|
// Back it up to the one used by CRM (using CacheKey implementation details)
|
||||||
|
ckey_pair[1]--;
|
||||||
|
|
||||||
// Specific key (subject to implementation details)
|
// Specific key (subject to implementation details)
|
||||||
EXPECT_EQ(ckey_pair, PairU64(0, 2));
|
EXPECT_EQ(ckey_pair, PairU64({0, 2}));
|
||||||
|
|
||||||
Cache::Handle* handle = cache->Lookup(ckey.AsSlice());
|
Cache::Handle* handle = cache->Lookup(ckey.AsSlice());
|
||||||
EXPECT_NE(handle, nullptr)
|
EXPECT_NE(handle, nullptr)
|
||||||
|
46
cache/cache_test.cc
vendored
46
cache/cache_test.cc
vendored
@ -14,7 +14,9 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "cache/clock_cache.h"
|
#include "cache/clock_cache.h"
|
||||||
|
#include "cache/fast_lru_cache.h"
|
||||||
#include "cache/lru_cache.h"
|
#include "cache/lru_cache.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
@ -39,6 +41,7 @@ static int DecodeValue(void* v) {
|
|||||||
|
|
||||||
const std::string kLRU = "lru";
|
const std::string kLRU = "lru";
|
||||||
const std::string kClock = "clock";
|
const std::string kClock = "clock";
|
||||||
|
const std::string kFast = "fast";
|
||||||
|
|
||||||
void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
|
void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
|
||||||
|
|
||||||
@ -83,6 +86,9 @@ class CacheTest : public testing::TestWithParam<std::string> {
|
|||||||
if (type == kClock) {
|
if (type == kClock) {
|
||||||
return NewClockCache(capacity);
|
return NewClockCache(capacity);
|
||||||
}
|
}
|
||||||
|
if (type == kFast) {
|
||||||
|
return NewFastLRUCache(capacity);
|
||||||
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,6 +109,10 @@ class CacheTest : public testing::TestWithParam<std::string> {
|
|||||||
return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
|
return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||||
charge_policy);
|
charge_policy);
|
||||||
}
|
}
|
||||||
|
if (type == kFast) {
|
||||||
|
return NewFastLRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||||
|
charge_policy);
|
||||||
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +193,7 @@ TEST_P(CacheTest, UsageTest) {
|
|||||||
|
|
||||||
// make sure the cache will be overloaded
|
// make sure the cache will be overloaded
|
||||||
for (uint64_t i = 1; i < kCapacity; ++i) {
|
for (uint64_t i = 1; i < kCapacity; ++i) {
|
||||||
auto key = ToString(i);
|
auto key = std::to_string(i);
|
||||||
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
||||||
dumbDeleter));
|
dumbDeleter));
|
||||||
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
||||||
@ -255,7 +265,7 @@ TEST_P(CacheTest, PinnedUsageTest) {
|
|||||||
|
|
||||||
// check that overloading the cache does not change the pinned usage
|
// check that overloading the cache does not change the pinned usage
|
||||||
for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
|
for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
|
||||||
auto key = ToString(i);
|
auto key = std::to_string(i);
|
||||||
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
||||||
dumbDeleter));
|
dumbDeleter));
|
||||||
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
||||||
@ -575,7 +585,7 @@ TEST_P(CacheTest, SetCapacity) {
|
|||||||
std::vector<Cache::Handle*> handles(10);
|
std::vector<Cache::Handle*> handles(10);
|
||||||
// Insert 5 entries, but not releasing.
|
// Insert 5 entries, but not releasing.
|
||||||
for (size_t i = 0; i < 5; i++) {
|
for (size_t i = 0; i < 5; i++) {
|
||||||
std::string key = ToString(i+1);
|
std::string key = std::to_string(i + 1);
|
||||||
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||||
ASSERT_TRUE(s.ok());
|
ASSERT_TRUE(s.ok());
|
||||||
}
|
}
|
||||||
@ -590,7 +600,7 @@ TEST_P(CacheTest, SetCapacity) {
|
|||||||
// then decrease capacity to 7, final capacity should be 7
|
// then decrease capacity to 7, final capacity should be 7
|
||||||
// and usage should be 7
|
// and usage should be 7
|
||||||
for (size_t i = 5; i < 10; i++) {
|
for (size_t i = 5; i < 10; i++) {
|
||||||
std::string key = ToString(i+1);
|
std::string key = std::to_string(i + 1);
|
||||||
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||||
ASSERT_TRUE(s.ok());
|
ASSERT_TRUE(s.ok());
|
||||||
}
|
}
|
||||||
@ -621,7 +631,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
|||||||
std::vector<Cache::Handle*> handles(10);
|
std::vector<Cache::Handle*> handles(10);
|
||||||
Status s;
|
Status s;
|
||||||
for (size_t i = 0; i < 10; i++) {
|
for (size_t i = 0; i < 10; i++) {
|
||||||
std::string key = ToString(i + 1);
|
std::string key = std::to_string(i + 1);
|
||||||
s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
ASSERT_NE(nullptr, handles[i]);
|
ASSERT_NE(nullptr, handles[i]);
|
||||||
@ -645,7 +655,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
|||||||
// test3: init with flag being true.
|
// test3: init with flag being true.
|
||||||
std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
|
std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
|
||||||
for (size_t i = 0; i < 5; i++) {
|
for (size_t i = 0; i < 5; i++) {
|
||||||
std::string key = ToString(i + 1);
|
std::string key = std::to_string(i + 1);
|
||||||
s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
ASSERT_NE(nullptr, handles[i]);
|
ASSERT_NE(nullptr, handles[i]);
|
||||||
@ -675,14 +685,14 @@ TEST_P(CacheTest, OverCapacity) {
|
|||||||
|
|
||||||
// Insert n+1 entries, but not releasing.
|
// Insert n+1 entries, but not releasing.
|
||||||
for (size_t i = 0; i < n + 1; i++) {
|
for (size_t i = 0; i < n + 1; i++) {
|
||||||
std::string key = ToString(i+1);
|
std::string key = std::to_string(i + 1);
|
||||||
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||||
ASSERT_TRUE(s.ok());
|
ASSERT_TRUE(s.ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Guess what's in the cache now?
|
// Guess what's in the cache now?
|
||||||
for (size_t i = 0; i < n + 1; i++) {
|
for (size_t i = 0; i < n + 1; i++) {
|
||||||
std::string key = ToString(i+1);
|
std::string key = std::to_string(i + 1);
|
||||||
auto h = cache->Lookup(key);
|
auto h = cache->Lookup(key);
|
||||||
ASSERT_TRUE(h != nullptr);
|
ASSERT_TRUE(h != nullptr);
|
||||||
if (h) cache->Release(h);
|
if (h) cache->Release(h);
|
||||||
@ -703,7 +713,7 @@ TEST_P(CacheTest, OverCapacity) {
|
|||||||
// This is consistent with the LRU policy since the element 0
|
// This is consistent with the LRU policy since the element 0
|
||||||
// was released first
|
// was released first
|
||||||
for (size_t i = 0; i < n + 1; i++) {
|
for (size_t i = 0; i < n + 1; i++) {
|
||||||
std::string key = ToString(i+1);
|
std::string key = std::to_string(i + 1);
|
||||||
auto h = cache->Lookup(key);
|
auto h = cache->Lookup(key);
|
||||||
if (h) {
|
if (h) {
|
||||||
ASSERT_NE(i, 0U);
|
ASSERT_NE(i, 0U);
|
||||||
@ -744,9 +754,9 @@ TEST_P(CacheTest, ApplyToAllEntriesTest) {
|
|||||||
std::vector<std::string> callback_state;
|
std::vector<std::string> callback_state;
|
||||||
const auto callback = [&](const Slice& key, void* value, size_t charge,
|
const auto callback = [&](const Slice& key, void* value, size_t charge,
|
||||||
Cache::DeleterFn deleter) {
|
Cache::DeleterFn deleter) {
|
||||||
callback_state.push_back(ToString(DecodeKey(key)) + "," +
|
callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
|
||||||
ToString(DecodeValue(value)) + "," +
|
std::to_string(DecodeValue(value)) + "," +
|
||||||
ToString(charge));
|
std::to_string(charge));
|
||||||
assert(deleter == &CacheTest::Deleter);
|
assert(deleter == &CacheTest::Deleter);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -755,8 +765,8 @@ TEST_P(CacheTest, ApplyToAllEntriesTest) {
|
|||||||
|
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
Insert(i, i * 2, i + 1);
|
Insert(i, i * 2, i + 1);
|
||||||
inserted.push_back(ToString(i) + "," + ToString(i * 2) + "," +
|
inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," +
|
||||||
ToString(i + 1));
|
std::to_string(i + 1));
|
||||||
}
|
}
|
||||||
cache_->ApplyToAllEntries(callback, /*opts*/ {});
|
cache_->ApplyToAllEntries(callback, /*opts*/ {});
|
||||||
|
|
||||||
@ -838,11 +848,13 @@ TEST_P(CacheTest, GetChargeAndDeleter) {
|
|||||||
std::shared_ptr<Cache> (*new_clock_cache_func)(
|
std::shared_ptr<Cache> (*new_clock_cache_func)(
|
||||||
size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
|
size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
|
||||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
|
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
|
||||||
testing::Values(kLRU, kClock));
|
testing::Values(kLRU, kClock, kFast));
|
||||||
#else
|
#else
|
||||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
|
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
|
||||||
|
testing::Values(kLRU, kFast));
|
||||||
#endif // SUPPORT_CLOCK_CACHE
|
#endif // SUPPORT_CLOCK_CACHE
|
||||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
|
INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest,
|
||||||
|
testing::Values(kLRU, kFast));
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include "cache/lru_secondary_cache.h"
|
#include "cache/compressed_secondary_cache.h"
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@ void DeletionCallback(const Slice& /*key*/, void* obj) {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
LRUSecondaryCache::LRUSecondaryCache(
|
CompressedSecondaryCache::CompressedSecondaryCache(
|
||||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||||
double high_pri_pool_ratio,
|
double high_pri_pool_ratio,
|
||||||
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
||||||
@ -37,11 +37,13 @@ LRUSecondaryCache::LRUSecondaryCache(
|
|||||||
use_adaptive_mutex, metadata_charge_policy);
|
use_adaptive_mutex, metadata_charge_policy);
|
||||||
}
|
}
|
||||||
|
|
||||||
LRUSecondaryCache::~LRUSecondaryCache() { cache_.reset(); }
|
CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
|
||||||
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> LRUSecondaryCache::Lookup(
|
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
|
||||||
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/) {
|
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
||||||
|
bool& is_in_sec_cache) {
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle;
|
std::unique_ptr<SecondaryCacheResultHandle> handle;
|
||||||
|
is_in_sec_cache = false;
|
||||||
Cache::Handle* lru_handle = cache_->Lookup(key);
|
Cache::Handle* lru_handle = cache_->Lookup(key);
|
||||||
if (lru_handle == nullptr) {
|
if (lru_handle == nullptr) {
|
||||||
return handle;
|
return handle;
|
||||||
@ -69,24 +71,25 @@ std::unique_ptr<SecondaryCacheResultHandle> LRUSecondaryCache::Lookup(
|
|||||||
cache_options_.memory_allocator.get());
|
cache_options_.memory_allocator.get());
|
||||||
|
|
||||||
if (!uncompressed) {
|
if (!uncompressed) {
|
||||||
cache_->Release(lru_handle, true);
|
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
s = create_cb(uncompressed.get(), uncompressed_size, &value, &charge);
|
s = create_cb(uncompressed.get(), uncompressed_size, &value, &charge);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
cache_->Release(lru_handle, true);
|
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
handle.reset(new LRUSecondaryCacheResultHandle(value, charge));
|
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
||||||
cache_->Release(lru_handle);
|
handle.reset(new CompressedSecondaryCacheResultHandle(value, charge));
|
||||||
|
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status LRUSecondaryCache::Insert(const Slice& key, void* value,
|
Status CompressedSecondaryCache::Insert(const Slice& key, void* value,
|
||||||
const Cache::CacheItemHelper* helper) {
|
const Cache::CacheItemHelper* helper) {
|
||||||
size_t size = (*helper->size_cb)(value);
|
size_t size = (*helper->size_cb)(value);
|
||||||
CacheAllocationPtr ptr =
|
CacheAllocationPtr ptr =
|
||||||
AllocateBlock(size, cache_options_.memory_allocator.get());
|
AllocateBlock(size, cache_options_.memory_allocator.get());
|
||||||
@ -125,9 +128,9 @@ Status LRUSecondaryCache::Insert(const Slice& key, void* value,
|
|||||||
return cache_->Insert(key, buf, size, DeletionCallback);
|
return cache_->Insert(key, buf, size, DeletionCallback);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LRUSecondaryCache::Erase(const Slice& key) { cache_->Erase(key); }
|
void CompressedSecondaryCache::Erase(const Slice& key) { cache_->Erase(key); }
|
||||||
|
|
||||||
std::string LRUSecondaryCache::GetPrintableOptions() const {
|
std::string CompressedSecondaryCache::GetPrintableOptions() const {
|
||||||
std::string ret;
|
std::string ret;
|
||||||
ret.reserve(20000);
|
ret.reserve(20000);
|
||||||
const int kBufferSize = 200;
|
const int kBufferSize = 200;
|
||||||
@ -142,23 +145,23 @@ std::string LRUSecondaryCache::GetPrintableOptions() const {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<SecondaryCache> NewLRUSecondaryCache(
|
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
|
||||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||||
double high_pri_pool_ratio,
|
double high_pri_pool_ratio,
|
||||||
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
||||||
CacheMetadataChargePolicy metadata_charge_policy,
|
CacheMetadataChargePolicy metadata_charge_policy,
|
||||||
CompressionType compression_type, uint32_t compress_format_version) {
|
CompressionType compression_type, uint32_t compress_format_version) {
|
||||||
return std::make_shared<LRUSecondaryCache>(
|
return std::make_shared<CompressedSecondaryCache>(
|
||||||
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
|
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
|
||||||
memory_allocator, use_adaptive_mutex, metadata_charge_policy,
|
memory_allocator, use_adaptive_mutex, metadata_charge_policy,
|
||||||
compression_type, compress_format_version);
|
compression_type, compress_format_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<SecondaryCache> NewLRUSecondaryCache(
|
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
|
||||||
const LRUSecondaryCacheOptions& opts) {
|
const CompressedSecondaryCacheOptions& opts) {
|
||||||
// The secondary_cache is disabled for this LRUCache instance.
|
// The secondary_cache is disabled for this LRUCache instance.
|
||||||
assert(opts.secondary_cache == nullptr);
|
assert(opts.secondary_cache == nullptr);
|
||||||
return NewLRUSecondaryCache(
|
return NewCompressedSecondaryCache(
|
||||||
opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
|
opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
|
||||||
opts.high_pri_pool_ratio, opts.memory_allocator, opts.use_adaptive_mutex,
|
opts.high_pri_pool_ratio, opts.memory_allocator, opts.use_adaptive_mutex,
|
||||||
opts.metadata_charge_policy, opts.compression_type,
|
opts.metadata_charge_policy, opts.compression_type,
|
@ -16,15 +16,16 @@
|
|||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
class LRUSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
|
class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
|
||||||
public:
|
public:
|
||||||
LRUSecondaryCacheResultHandle(void* value, size_t size)
|
CompressedSecondaryCacheResultHandle(void* value, size_t size)
|
||||||
: value_(value), size_(size) {}
|
: value_(value), size_(size) {}
|
||||||
virtual ~LRUSecondaryCacheResultHandle() override = default;
|
virtual ~CompressedSecondaryCacheResultHandle() override = default;
|
||||||
|
|
||||||
LRUSecondaryCacheResultHandle(const LRUSecondaryCacheResultHandle&) = delete;
|
CompressedSecondaryCacheResultHandle(
|
||||||
LRUSecondaryCacheResultHandle& operator=(
|
const CompressedSecondaryCacheResultHandle&) = delete;
|
||||||
const LRUSecondaryCacheResultHandle&) = delete;
|
CompressedSecondaryCacheResultHandle& operator=(
|
||||||
|
const CompressedSecondaryCacheResultHandle&) = delete;
|
||||||
|
|
||||||
bool IsReady() override { return true; }
|
bool IsReady() override { return true; }
|
||||||
|
|
||||||
@ -39,19 +40,19 @@ class LRUSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
|
|||||||
size_t size_;
|
size_t size_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// The LRUSecondaryCache is a concrete implementation of
|
// The CompressedSecondaryCache is a concrete implementation of
|
||||||
// rocksdb::SecondaryCache.
|
// rocksdb::SecondaryCache.
|
||||||
//
|
//
|
||||||
// Users can also cast a pointer to it and call methods on
|
// Users can also cast a pointer to it and call methods on
|
||||||
// it directly, especially custom methods that may be added
|
// it directly, especially custom methods that may be added
|
||||||
// in the future. For example -
|
// in the future. For example -
|
||||||
// std::unique_ptr<rocksdb::SecondaryCache> cache =
|
// std::unique_ptr<rocksdb::SecondaryCache> cache =
|
||||||
// NewLRUSecondaryCache(opts);
|
// NewCompressedSecondaryCache(opts);
|
||||||
// static_cast<LRUSecondaryCache*>(cache.get())->Erase(key);
|
// static_cast<CompressedSecondaryCache*>(cache.get())->Erase(key);
|
||||||
|
|
||||||
class LRUSecondaryCache : public SecondaryCache {
|
class CompressedSecondaryCache : public SecondaryCache {
|
||||||
public:
|
public:
|
||||||
LRUSecondaryCache(
|
CompressedSecondaryCache(
|
||||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||||
double high_pri_pool_ratio,
|
double high_pri_pool_ratio,
|
||||||
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
|
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
|
||||||
@ -60,16 +61,16 @@ class LRUSecondaryCache : public SecondaryCache {
|
|||||||
kDontChargeCacheMetadata,
|
kDontChargeCacheMetadata,
|
||||||
CompressionType compression_type = CompressionType::kLZ4Compression,
|
CompressionType compression_type = CompressionType::kLZ4Compression,
|
||||||
uint32_t compress_format_version = 2);
|
uint32_t compress_format_version = 2);
|
||||||
virtual ~LRUSecondaryCache() override;
|
virtual ~CompressedSecondaryCache() override;
|
||||||
|
|
||||||
const char* Name() const override { return "LRUSecondaryCache"; }
|
const char* Name() const override { return "CompressedSecondaryCache"; }
|
||||||
|
|
||||||
Status Insert(const Slice& key, void* value,
|
Status Insert(const Slice& key, void* value,
|
||||||
const Cache::CacheItemHelper* helper) override;
|
const Cache::CacheItemHelper* helper) override;
|
||||||
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
||||||
const Slice& key, const Cache::CreateCallback& create_cb,
|
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
||||||
bool /*wait*/) override;
|
bool& is_in_sec_cache) override;
|
||||||
|
|
||||||
void Erase(const Slice& key) override;
|
void Erase(const Slice& key) override;
|
||||||
|
|
||||||
@ -79,7 +80,7 @@ class LRUSecondaryCache : public SecondaryCache {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
std::shared_ptr<Cache> cache_;
|
std::shared_ptr<Cache> cache_;
|
||||||
LRUSecondaryCacheOptions cache_options_;
|
CompressedSecondaryCacheOptions cache_options_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
@ -3,7 +3,7 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include "cache/lru_secondary_cache.h"
|
#include "cache/compressed_secondary_cache.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
@ -17,10 +17,10 @@
|
|||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
class LRUSecondaryCacheTest : public testing::Test {
|
class CompressedSecondaryCacheTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
LRUSecondaryCacheTest() : fail_create_(false) {}
|
CompressedSecondaryCacheTest() : fail_create_(false) {}
|
||||||
~LRUSecondaryCacheTest() {}
|
~CompressedSecondaryCacheTest() {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
class TestItem {
|
class TestItem {
|
||||||
@ -80,7 +80,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
void SetFailCreate(bool fail) { fail_create_ = fail; }
|
void SetFailCreate(bool fail) { fail_create_ = fail; }
|
||||||
|
|
||||||
void BasicTest(bool sec_cache_is_compressed, bool use_jemalloc) {
|
void BasicTest(bool sec_cache_is_compressed, bool use_jemalloc) {
|
||||||
LRUSecondaryCacheOptions opts;
|
CompressedSecondaryCacheOptions opts;
|
||||||
opts.capacity = 2048;
|
opts.capacity = 2048;
|
||||||
opts.num_shard_bits = 0;
|
opts.num_shard_bits = 0;
|
||||||
opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
@ -107,11 +107,13 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
|
ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::shared_ptr<SecondaryCache> cache = NewLRUSecondaryCache(opts);
|
std::shared_ptr<SecondaryCache> sec_cache =
|
||||||
|
NewCompressedSecondaryCache(opts);
|
||||||
|
|
||||||
|
bool is_in_sec_cache{true};
|
||||||
// Lookup an non-existent key.
|
// Lookup an non-existent key.
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle0 =
|
std::unique_ptr<SecondaryCacheResultHandle> handle0 =
|
||||||
cache->Lookup("k0", test_item_creator, true);
|
sec_cache->Lookup("k0", test_item_creator, true, is_in_sec_cache);
|
||||||
ASSERT_EQ(handle0, nullptr);
|
ASSERT_EQ(handle0, nullptr);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
@ -119,51 +121,47 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
std::string str1;
|
std::string str1;
|
||||||
test::CompressibleString(&rnd, 0.25, 1000, &str1);
|
test::CompressibleString(&rnd, 0.25, 1000, &str1);
|
||||||
TestItem item1(str1.data(), str1.length());
|
TestItem item1(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", &item1, &LRUSecondaryCacheTest::helper_));
|
ASSERT_OK(sec_cache->Insert("k1", &item1,
|
||||||
|
&CompressedSecondaryCacheTest::helper_));
|
||||||
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle1 =
|
std::unique_ptr<SecondaryCacheResultHandle> handle1 =
|
||||||
cache->Lookup("k1", test_item_creator, true);
|
sec_cache->Lookup("k1", test_item_creator, true, is_in_sec_cache);
|
||||||
ASSERT_NE(handle1, nullptr);
|
ASSERT_NE(handle1, nullptr);
|
||||||
// delete reinterpret_cast<TestItem*>(handle1->Value());
|
ASSERT_FALSE(is_in_sec_cache);
|
||||||
|
|
||||||
std::unique_ptr<TestItem> val1 =
|
std::unique_ptr<TestItem> val1 =
|
||||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle1->Value()));
|
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle1->Value()));
|
||||||
ASSERT_NE(val1, nullptr);
|
ASSERT_NE(val1, nullptr);
|
||||||
ASSERT_EQ(memcmp(val1->Buf(), item1.Buf(), item1.Size()), 0);
|
ASSERT_EQ(memcmp(val1->Buf(), item1.Buf(), item1.Size()), 0);
|
||||||
|
|
||||||
|
// Lookup the first item again.
|
||||||
|
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
|
||||||
|
sec_cache->Lookup("k1", test_item_creator, true, is_in_sec_cache);
|
||||||
|
ASSERT_EQ(handle1_1, nullptr);
|
||||||
|
|
||||||
// Insert and Lookup the second item.
|
// Insert and Lookup the second item.
|
||||||
std::string str2;
|
std::string str2;
|
||||||
test::CompressibleString(&rnd, 0.5, 1000, &str2);
|
test::CompressibleString(&rnd, 0.5, 1000, &str2);
|
||||||
TestItem item2(str2.data(), str2.length());
|
TestItem item2(str2.data(), str2.length());
|
||||||
ASSERT_OK(cache->Insert("k2", &item2, &LRUSecondaryCacheTest::helper_));
|
ASSERT_OK(sec_cache->Insert("k2", &item2,
|
||||||
|
&CompressedSecondaryCacheTest::helper_));
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle2 =
|
std::unique_ptr<SecondaryCacheResultHandle> handle2 =
|
||||||
cache->Lookup("k2", test_item_creator, true);
|
sec_cache->Lookup("k2", test_item_creator, true, is_in_sec_cache);
|
||||||
ASSERT_NE(handle2, nullptr);
|
ASSERT_NE(handle2, nullptr);
|
||||||
std::unique_ptr<TestItem> val2 =
|
std::unique_ptr<TestItem> val2 =
|
||||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
|
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
|
||||||
ASSERT_NE(val2, nullptr);
|
ASSERT_NE(val2, nullptr);
|
||||||
ASSERT_EQ(memcmp(val2->Buf(), item2.Buf(), item2.Size()), 0);
|
ASSERT_EQ(memcmp(val2->Buf(), item2.Buf(), item2.Size()), 0);
|
||||||
|
|
||||||
// Lookup the first item again to make sure it is still in the cache.
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
|
|
||||||
cache->Lookup("k1", test_item_creator, true);
|
|
||||||
ASSERT_NE(handle1_1, nullptr);
|
|
||||||
std::unique_ptr<TestItem> val1_1 =
|
|
||||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle1_1->Value()));
|
|
||||||
ASSERT_NE(val1_1, nullptr);
|
|
||||||
ASSERT_EQ(memcmp(val1_1->Buf(), item1.Buf(), item1.Size()), 0);
|
|
||||||
|
|
||||||
std::vector<SecondaryCacheResultHandle*> handles = {handle1.get(),
|
std::vector<SecondaryCacheResultHandle*> handles = {handle1.get(),
|
||||||
handle2.get()};
|
handle2.get()};
|
||||||
cache->WaitAll(handles);
|
sec_cache->WaitAll(handles);
|
||||||
|
|
||||||
cache->Erase("k1");
|
sec_cache.reset();
|
||||||
handle1 = cache->Lookup("k1", test_item_creator, true);
|
|
||||||
ASSERT_EQ(handle1, nullptr);
|
|
||||||
|
|
||||||
cache.reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FailsTest(bool sec_cache_is_compressed) {
|
void FailsTest(bool sec_cache_is_compressed) {
|
||||||
LRUSecondaryCacheOptions secondary_cache_opts;
|
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||||
if (sec_cache_is_compressed) {
|
if (sec_cache_is_compressed) {
|
||||||
if (!LZ4_Supported()) {
|
if (!LZ4_Supported()) {
|
||||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||||
@ -176,32 +174,28 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
secondary_cache_opts.capacity = 1100;
|
secondary_cache_opts.capacity = 1100;
|
||||||
secondary_cache_opts.num_shard_bits = 0;
|
secondary_cache_opts.num_shard_bits = 0;
|
||||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
std::shared_ptr<SecondaryCache> cache =
|
std::shared_ptr<SecondaryCache> sec_cache =
|
||||||
NewLRUSecondaryCache(secondary_cache_opts);
|
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||||
|
|
||||||
// Insert and Lookup the first item.
|
// Insert and Lookup the first item.
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1(rnd.RandomString(1000));
|
std::string str1(rnd.RandomString(1000));
|
||||||
TestItem item1(str1.data(), str1.length());
|
TestItem item1(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", &item1, &LRUSecondaryCacheTest::helper_));
|
ASSERT_OK(sec_cache->Insert("k1", &item1,
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle1 =
|
&CompressedSecondaryCacheTest::helper_));
|
||||||
cache->Lookup("k1", test_item_creator, true);
|
|
||||||
ASSERT_NE(handle1, nullptr);
|
|
||||||
std::unique_ptr<TestItem> val1 =
|
|
||||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle1->Value()));
|
|
||||||
ASSERT_NE(val1, nullptr);
|
|
||||||
ASSERT_EQ(memcmp(val1->Buf(), item1.Buf(), item1.Size()), 0);
|
|
||||||
|
|
||||||
// Insert and Lookup the second item.
|
// Insert and Lookup the second item.
|
||||||
std::string str2(rnd.RandomString(200));
|
std::string str2(rnd.RandomString(200));
|
||||||
TestItem item2(str2.data(), str2.length());
|
TestItem item2(str2.data(), str2.length());
|
||||||
// k1 is evicted.
|
// k1 is evicted.
|
||||||
ASSERT_OK(cache->Insert("k2", &item2, &LRUSecondaryCacheTest::helper_));
|
ASSERT_OK(sec_cache->Insert("k2", &item2,
|
||||||
|
&CompressedSecondaryCacheTest::helper_));
|
||||||
|
bool is_in_sec_cache{false};
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
|
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
|
||||||
cache->Lookup("k1", test_item_creator, true);
|
sec_cache->Lookup("k1", test_item_creator, true, is_in_sec_cache);
|
||||||
ASSERT_EQ(handle1_1, nullptr);
|
ASSERT_EQ(handle1_1, nullptr);
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle2 =
|
std::unique_ptr<SecondaryCacheResultHandle> handle2 =
|
||||||
cache->Lookup("k2", test_item_creator, true);
|
sec_cache->Lookup("k2", test_item_creator, true, is_in_sec_cache);
|
||||||
ASSERT_NE(handle2, nullptr);
|
ASSERT_NE(handle2, nullptr);
|
||||||
std::unique_ptr<TestItem> val2 =
|
std::unique_ptr<TestItem> val2 =
|
||||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
|
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
|
||||||
@ -211,20 +205,20 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
// Create Fails.
|
// Create Fails.
|
||||||
SetFailCreate(true);
|
SetFailCreate(true);
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 =
|
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 =
|
||||||
cache->Lookup("k2", test_item_creator, true);
|
sec_cache->Lookup("k2", test_item_creator, true, is_in_sec_cache);
|
||||||
ASSERT_EQ(handle2_1, nullptr);
|
ASSERT_EQ(handle2_1, nullptr);
|
||||||
|
|
||||||
// Save Fails.
|
// Save Fails.
|
||||||
std::string str3 = rnd.RandomString(10);
|
std::string str3 = rnd.RandomString(10);
|
||||||
TestItem item3(str3.data(), str3.length());
|
TestItem item3(str3.data(), str3.length());
|
||||||
ASSERT_NOK(
|
ASSERT_NOK(sec_cache->Insert("k3", &item3,
|
||||||
cache->Insert("k3", &item3, &LRUSecondaryCacheTest::helper_fail_));
|
&CompressedSecondaryCacheTest::helper_fail_));
|
||||||
|
|
||||||
cache.reset();
|
sec_cache.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BasicIntegrationTest(bool sec_cache_is_compressed) {
|
void BasicIntegrationTest(bool sec_cache_is_compressed) {
|
||||||
LRUSecondaryCacheOptions secondary_cache_opts;
|
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||||
|
|
||||||
if (sec_cache_is_compressed) {
|
if (sec_cache_is_compressed) {
|
||||||
if (!LZ4_Supported()) {
|
if (!LZ4_Supported()) {
|
||||||
@ -239,7 +233,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
secondary_cache_opts.num_shard_bits = 0;
|
secondary_cache_opts.num_shard_bits = 0;
|
||||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||||
NewLRUSecondaryCache(secondary_cache_opts);
|
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||||
LRUCacheOptions lru_cache_opts(1024, 0, false, 0.5, nullptr,
|
LRUCacheOptions lru_cache_opts(1024, 0, false, 0.5, nullptr,
|
||||||
kDefaultToAdaptiveMutex,
|
kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
@ -252,26 +246,26 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
std::string str1 = rnd.RandomString(1010);
|
std::string str1 = rnd.RandomString(1010);
|
||||||
std::string str1_clone{str1};
|
std::string str1_clone{str1};
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1, &CompressedSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
|
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// After Insert, lru cache contains k2 and secondary cache contains k1.
|
// After Insert, lru cache contains k2 and secondary cache contains k1.
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k2", item2, &CompressedSecondaryCacheTest::helper_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
std::string str3 = rnd.RandomString(1020);
|
std::string str3 = rnd.RandomString(1020);
|
||||||
TestItem* item3 = new TestItem(str3.data(), str3.length());
|
TestItem* item3 = new TestItem(str3.data(), str3.length());
|
||||||
// After Insert, lru cache contains k3 and secondary cache contains k1 and
|
// After Insert, lru cache contains k3 and secondary cache contains k1 and
|
||||||
// k2
|
// k2
|
||||||
ASSERT_OK(cache->Insert("k3", item3, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k3", item3, &CompressedSecondaryCacheTest::helper_,
|
||||||
str3.length()));
|
str3.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
handle =
|
handle = cache->Lookup("k3", &CompressedSecondaryCacheTest::helper_,
|
||||||
cache->Lookup("k3", &LRUSecondaryCacheTest::helper_, test_item_creator,
|
test_item_creator, Cache::Priority::LOW, true,
|
||||||
Cache::Priority::LOW, true, stats.get());
|
stats.get());
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
TestItem* val3 = static_cast<TestItem*>(cache->Value(handle));
|
TestItem* val3 = static_cast<TestItem*>(cache->Value(handle));
|
||||||
ASSERT_NE(val3, nullptr);
|
ASSERT_NE(val3, nullptr);
|
||||||
@ -279,34 +273,35 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
|
|
||||||
// Lookup an non-existent key.
|
// Lookup an non-existent key.
|
||||||
handle =
|
handle = cache->Lookup("k0", &CompressedSecondaryCacheTest::helper_,
|
||||||
cache->Lookup("k0", &LRUSecondaryCacheTest::helper_, test_item_creator,
|
test_item_creator, Cache::Priority::LOW, true,
|
||||||
Cache::Priority::LOW, true, stats.get());
|
stats.get());
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
|
|
||||||
// This Lookup should promote k1 and demote k3, so k2 is evicted from the
|
// This Lookup should promote k1 and erase k1 from the secondary cache,
|
||||||
// secondary cache. The lru cache contains k1 and secondary cache contains
|
// then k3 is demoted. So k2 and k3 are in the secondary cache.
|
||||||
// k3. item1 was Free(), so it cannot be compared against the item1.
|
handle = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_,
|
||||||
handle =
|
test_item_creator, Cache::Priority::LOW, true,
|
||||||
cache->Lookup("k1", &LRUSecondaryCacheTest::helper_, test_item_creator,
|
stats.get());
|
||||||
Cache::Priority::LOW, true, stats.get());
|
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
TestItem* val1_1 = static_cast<TestItem*>(cache->Value(handle));
|
TestItem* val1_1 = static_cast<TestItem*>(cache->Value(handle));
|
||||||
ASSERT_NE(val1_1, nullptr);
|
ASSERT_NE(val1_1, nullptr);
|
||||||
ASSERT_EQ(memcmp(val1_1->Buf(), str1_clone.data(), str1_clone.size()), 0);
|
ASSERT_EQ(memcmp(val1_1->Buf(), str1_clone.data(), str1_clone.size()), 0);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
|
|
||||||
handle =
|
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||||
cache->Lookup("k2", &LRUSecondaryCacheTest::helper_, test_item_creator,
|
test_item_creator, Cache::Priority::LOW, true,
|
||||||
Cache::Priority::LOW, true, stats.get());
|
stats.get());
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
|
cache->Release(handle);
|
||||||
|
|
||||||
cache.reset();
|
cache.reset();
|
||||||
secondary_cache.reset();
|
secondary_cache.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BasicIntegrationFailTest(bool sec_cache_is_compressed) {
|
void BasicIntegrationFailTest(bool sec_cache_is_compressed) {
|
||||||
LRUSecondaryCacheOptions secondary_cache_opts;
|
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||||
|
|
||||||
if (sec_cache_is_compressed) {
|
if (sec_cache_is_compressed) {
|
||||||
if (!LZ4_Supported()) {
|
if (!LZ4_Supported()) {
|
||||||
@ -321,7 +316,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
secondary_cache_opts.num_shard_bits = 0;
|
secondary_cache_opts.num_shard_bits = 0;
|
||||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||||
NewLRUSecondaryCache(secondary_cache_opts);
|
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||||
|
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
@ -333,7 +328,8 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
auto item1 =
|
auto item1 =
|
||||||
std::unique_ptr<TestItem>(new TestItem(str1.data(), str1.length()));
|
std::unique_ptr<TestItem>(new TestItem(str1.data(), str1.length()));
|
||||||
ASSERT_NOK(cache->Insert("k1", item1.get(), nullptr, str1.length()));
|
ASSERT_NOK(cache->Insert("k1", item1.get(), nullptr, str1.length()));
|
||||||
ASSERT_OK(cache->Insert("k1", item1.get(), &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1.get(),
|
||||||
|
&CompressedSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
item1.release(); // Appease clang-analyze "potential memory leak"
|
item1.release(); // Appease clang-analyze "potential memory leak"
|
||||||
|
|
||||||
@ -341,7 +337,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
handle = cache->Lookup("k2", nullptr, test_item_creator,
|
handle = cache->Lookup("k2", nullptr, test_item_creator,
|
||||||
Cache::Priority::LOW, true);
|
Cache::Priority::LOW, true);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, false);
|
test_item_creator, Cache::Priority::LOW, false);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
|
|
||||||
@ -350,7 +346,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void IntegrationSaveFailTest(bool sec_cache_is_compressed) {
|
void IntegrationSaveFailTest(bool sec_cache_is_compressed) {
|
||||||
LRUSecondaryCacheOptions secondary_cache_opts;
|
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||||
|
|
||||||
if (sec_cache_is_compressed) {
|
if (sec_cache_is_compressed) {
|
||||||
if (!LZ4_Supported()) {
|
if (!LZ4_Supported()) {
|
||||||
@ -366,7 +362,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
|
|
||||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||||
NewLRUSecondaryCache(secondary_cache_opts);
|
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||||
|
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
@ -376,25 +372,27 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_fail_,
|
ASSERT_OK(cache->Insert("k1", item1,
|
||||||
|
&CompressedSecondaryCacheTest::helper_fail_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to the secondary cache.
|
// k1 should be demoted to the secondary cache.
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_fail_,
|
ASSERT_OK(cache->Insert("k2", item2,
|
||||||
|
&CompressedSecondaryCacheTest::helper_fail_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_fail_,
|
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_fail_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
// This lookup should fail, since k1 demotion would have failed
|
// This lookup should fail, since k1 demotion would have failed
|
||||||
handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_fail_,
|
handle = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_fail_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
// Since k1 didn't get promoted, k2 should still be in cache
|
// Since k1 didn't get promoted, k2 should still be in cache
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_fail_,
|
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_fail_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
@ -404,7 +402,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void IntegrationCreateFailTest(bool sec_cache_is_compressed) {
|
void IntegrationCreateFailTest(bool sec_cache_is_compressed) {
|
||||||
LRUSecondaryCacheOptions secondary_cache_opts;
|
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||||
|
|
||||||
if (sec_cache_is_compressed) {
|
if (sec_cache_is_compressed) {
|
||||||
if (!LZ4_Supported()) {
|
if (!LZ4_Supported()) {
|
||||||
@ -420,7 +418,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
|
|
||||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||||
NewLRUSecondaryCache(secondary_cache_opts);
|
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||||
|
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
@ -430,27 +428,27 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1, &CompressedSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
|
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to the secondary cache.
|
// k1 should be demoted to the secondary cache.
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k2", item2, &CompressedSecondaryCacheTest::helper_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
SetFailCreate(true);
|
SetFailCreate(true);
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
// This lookup should fail, since k1 creation would have failed
|
// This lookup should fail, since k1 creation would have failed
|
||||||
handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
// Since k1 didn't get promoted, k2 should still be in cache
|
// Since k1 didn't get promoted, k2 should still be in cache
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
@ -460,7 +458,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void IntegrationFullCapacityTest(bool sec_cache_is_compressed) {
|
void IntegrationFullCapacityTest(bool sec_cache_is_compressed) {
|
||||||
LRUSecondaryCacheOptions secondary_cache_opts;
|
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||||
|
|
||||||
if (sec_cache_is_compressed) {
|
if (sec_cache_is_compressed) {
|
||||||
if (!LZ4_Supported()) {
|
if (!LZ4_Supported()) {
|
||||||
@ -476,7 +474,7 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||||
|
|
||||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||||
NewLRUSecondaryCache(secondary_cache_opts);
|
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||||
|
|
||||||
LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
|
LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
|
||||||
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
|
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
|
||||||
@ -486,31 +484,32 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1, &CompressedSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to the secondary cache.
|
// k1 should be demoted to the secondary cache.
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k2", item2, &CompressedSecondaryCacheTest::helper_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
|
||||||
ASSERT_NE(handle, nullptr);
|
|
||||||
// k1 promotion should fail due to the block cache being at capacity,
|
|
||||||
// but the lookup should still succeed
|
|
||||||
Cache::Handle* handle2;
|
Cache::Handle* handle2;
|
||||||
handle2 = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
|
handle2 = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||||
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
|
ASSERT_NE(handle2, nullptr);
|
||||||
|
cache->Release(handle2);
|
||||||
|
// k1 promotion should fail due to the block cache being at capacity,
|
||||||
|
// but the lookup should still succeed
|
||||||
|
Cache::Handle* handle1;
|
||||||
|
handle1 = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_,
|
||||||
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
|
ASSERT_NE(handle1, nullptr);
|
||||||
|
cache->Release(handle1);
|
||||||
|
|
||||||
|
// Since k1 didn't get inserted, k2 should still be in cache
|
||||||
|
handle2 = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle2, nullptr);
|
ASSERT_NE(handle2, nullptr);
|
||||||
// Since k1 didn't get inserted, k2 should still be in cache
|
|
||||||
cache->Release(handle);
|
|
||||||
cache->Release(handle2);
|
cache->Release(handle2);
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
|
||||||
ASSERT_NE(handle, nullptr);
|
|
||||||
cache->Release(handle);
|
|
||||||
|
|
||||||
cache.reset();
|
cache.reset();
|
||||||
secondary_cache.reset();
|
secondary_cache.reset();
|
||||||
@ -520,72 +519,83 @@ class LRUSecondaryCacheTest : public testing::Test {
|
|||||||
bool fail_create_;
|
bool fail_create_;
|
||||||
};
|
};
|
||||||
|
|
||||||
Cache::CacheItemHelper LRUSecondaryCacheTest::helper_(
|
Cache::CacheItemHelper CompressedSecondaryCacheTest::helper_(
|
||||||
LRUSecondaryCacheTest::SizeCallback, LRUSecondaryCacheTest::SaveToCallback,
|
CompressedSecondaryCacheTest::SizeCallback,
|
||||||
LRUSecondaryCacheTest::DeletionCallback);
|
CompressedSecondaryCacheTest::SaveToCallback,
|
||||||
|
CompressedSecondaryCacheTest::DeletionCallback);
|
||||||
|
|
||||||
Cache::CacheItemHelper LRUSecondaryCacheTest::helper_fail_(
|
Cache::CacheItemHelper CompressedSecondaryCacheTest::helper_fail_(
|
||||||
LRUSecondaryCacheTest::SizeCallback,
|
CompressedSecondaryCacheTest::SizeCallback,
|
||||||
LRUSecondaryCacheTest::SaveToCallbackFail,
|
CompressedSecondaryCacheTest::SaveToCallbackFail,
|
||||||
LRUSecondaryCacheTest::DeletionCallback);
|
CompressedSecondaryCacheTest::DeletionCallback);
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicTestWithNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest, BasicTestWithNoCompression) {
|
||||||
BasicTest(false, false);
|
BasicTest(false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicTestWithMemoryAllocatorAndNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest,
|
||||||
|
BasicTestWithMemoryAllocatorAndNoCompression) {
|
||||||
BasicTest(false, true);
|
BasicTest(false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicTestWithCompression) {
|
TEST_F(CompressedSecondaryCacheTest, BasicTestWithCompression) {
|
||||||
BasicTest(true, false);
|
BasicTest(true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicTestWithMemoryAllocatorAndCompression) {
|
TEST_F(CompressedSecondaryCacheTest,
|
||||||
|
BasicTestWithMemoryAllocatorAndCompression) {
|
||||||
BasicTest(true, true);
|
BasicTest(true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, FailsTestWithNoCompression) { FailsTest(false); }
|
TEST_F(CompressedSecondaryCacheTest, FailsTestWithNoCompression) {
|
||||||
|
FailsTest(false);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, FailsTestWithCompression) { FailsTest(true); }
|
TEST_F(CompressedSecondaryCacheTest, FailsTestWithCompression) {
|
||||||
|
FailsTest(true);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicIntegrationTestWithNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest, BasicIntegrationTestWithNoCompression) {
|
||||||
BasicIntegrationTest(false);
|
BasicIntegrationTest(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicIntegrationTestWithCompression) {
|
TEST_F(CompressedSecondaryCacheTest, BasicIntegrationTestWithCompression) {
|
||||||
BasicIntegrationTest(true);
|
BasicIntegrationTest(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicIntegrationFailTestWithNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest,
|
||||||
|
BasicIntegrationFailTestWithNoCompression) {
|
||||||
BasicIntegrationFailTest(false);
|
BasicIntegrationFailTest(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicIntegrationFailTestWithCompression) {
|
TEST_F(CompressedSecondaryCacheTest, BasicIntegrationFailTestWithCompression) {
|
||||||
BasicIntegrationFailTest(true);
|
BasicIntegrationFailTest(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, IntegrationSaveFailTestWithNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest, IntegrationSaveFailTestWithNoCompression) {
|
||||||
IntegrationSaveFailTest(false);
|
IntegrationSaveFailTest(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, IntegrationSaveFailTestWithCompression) {
|
TEST_F(CompressedSecondaryCacheTest, IntegrationSaveFailTestWithCompression) {
|
||||||
IntegrationSaveFailTest(true);
|
IntegrationSaveFailTest(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, IntegrationCreateFailTestWithNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest,
|
||||||
|
IntegrationCreateFailTestWithNoCompression) {
|
||||||
IntegrationCreateFailTest(false);
|
IntegrationCreateFailTest(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, IntegrationCreateFailTestWithCompression) {
|
TEST_F(CompressedSecondaryCacheTest, IntegrationCreateFailTestWithCompression) {
|
||||||
IntegrationCreateFailTest(true);
|
IntegrationCreateFailTest(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, IntegrationFullCapacityTestWithNoCompression) {
|
TEST_F(CompressedSecondaryCacheTest,
|
||||||
|
IntegrationFullCapacityTestWithNoCompression) {
|
||||||
IntegrationFullCapacityTest(false);
|
IntegrationFullCapacityTest(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, IntegrationFullCapacityTestWithCompression) {
|
TEST_F(CompressedSecondaryCacheTest,
|
||||||
|
IntegrationFullCapacityTestWithCompression) {
|
||||||
IntegrationFullCapacityTest(true);
|
IntegrationFullCapacityTest(true);
|
||||||
}
|
}
|
||||||
|
|
511
cache/fast_lru_cache.cc
vendored
Normal file
511
cache/fast_lru_cache.cc
vendored
Normal file
@ -0,0 +1,511 @@
|
|||||||
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under both the GPLv2 (found in the
|
||||||
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
//
|
||||||
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include "cache/fast_lru_cache.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
|
#include "monitoring/perf_context_imp.h"
|
||||||
|
#include "monitoring/statistics.h"
|
||||||
|
#include "port/lang.h"
|
||||||
|
#include "util/mutexlock.h"
|
||||||
|
|
||||||
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
|
namespace fast_lru_cache {
|
||||||
|
|
||||||
|
LRUHandleTable::LRUHandleTable(int max_upper_hash_bits)
|
||||||
|
: length_bits_(/* historical starting size*/ 4),
|
||||||
|
list_(new LRUHandle* [size_t{1} << length_bits_] {}),
|
||||||
|
elems_(0),
|
||||||
|
max_length_bits_(max_upper_hash_bits) {}
|
||||||
|
|
||||||
|
LRUHandleTable::~LRUHandleTable() {
|
||||||
|
ApplyToEntriesRange(
|
||||||
|
[](LRUHandle* h) {
|
||||||
|
if (!h->HasRefs()) {
|
||||||
|
h->Free();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
0, uint32_t{1} << length_bits_);
|
||||||
|
}
|
||||||
|
|
||||||
|
LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
|
||||||
|
return *FindPointer(key, hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
|
||||||
|
LRUHandle** ptr = FindPointer(h->key(), h->hash);
|
||||||
|
LRUHandle* old = *ptr;
|
||||||
|
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
|
||||||
|
*ptr = h;
|
||||||
|
if (old == nullptr) {
|
||||||
|
++elems_;
|
||||||
|
if ((elems_ >> length_bits_) > 0) { // elems_ >= length
|
||||||
|
// Since each cache entry is fairly large, we aim for a small
|
||||||
|
// average linked list length (<= 1).
|
||||||
|
Resize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
|
||||||
|
LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
|
||||||
|
LRUHandle** ptr = FindPointer(key, hash);
|
||||||
|
LRUHandle* result = *ptr;
|
||||||
|
if (result != nullptr) {
|
||||||
|
*ptr = result->next_hash;
|
||||||
|
--elems_;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
|
||||||
|
LRUHandle** ptr = &list_[hash >> (32 - length_bits_)];
|
||||||
|
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
|
||||||
|
ptr = &(*ptr)->next_hash;
|
||||||
|
}
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUHandleTable::Resize() {
|
||||||
|
if (length_bits_ >= max_length_bits_) {
|
||||||
|
// Due to reaching limit of hash information, if we made the table bigger,
|
||||||
|
// we would allocate more addresses but only the same number would be used.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (length_bits_ >= 31) {
|
||||||
|
// Avoid undefined behavior shifting uint32_t by 32.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t old_length = uint32_t{1} << length_bits_;
|
||||||
|
int new_length_bits = length_bits_ + 1;
|
||||||
|
std::unique_ptr<LRUHandle* []> new_list {
|
||||||
|
new LRUHandle* [size_t{1} << new_length_bits] {}
|
||||||
|
};
|
||||||
|
uint32_t count = 0;
|
||||||
|
for (uint32_t i = 0; i < old_length; i++) {
|
||||||
|
LRUHandle* h = list_[i];
|
||||||
|
while (h != nullptr) {
|
||||||
|
LRUHandle* next = h->next_hash;
|
||||||
|
uint32_t hash = h->hash;
|
||||||
|
LRUHandle** ptr = &new_list[hash >> (32 - new_length_bits)];
|
||||||
|
h->next_hash = *ptr;
|
||||||
|
*ptr = h;
|
||||||
|
h = next;
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(elems_ == count);
|
||||||
|
list_ = std::move(new_list);
|
||||||
|
length_bits_ = new_length_bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy,
|
||||||
|
int max_upper_hash_bits)
|
||||||
|
: capacity_(0),
|
||||||
|
strict_capacity_limit_(strict_capacity_limit),
|
||||||
|
table_(max_upper_hash_bits),
|
||||||
|
usage_(0),
|
||||||
|
lru_usage_(0) {
|
||||||
|
set_metadata_charge_policy(metadata_charge_policy);
|
||||||
|
// Make empty circular linked list.
|
||||||
|
lru_.next = &lru_;
|
||||||
|
lru_.prev = &lru_;
|
||||||
|
lru_low_pri_ = &lru_;
|
||||||
|
SetCapacity(capacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::EraseUnRefEntries() {
|
||||||
|
autovector<LRUHandle*> last_reference_list;
|
||||||
|
{
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
while (lru_.next != &lru_) {
|
||||||
|
LRUHandle* old = lru_.next;
|
||||||
|
// LRU list contains only elements which can be evicted.
|
||||||
|
assert(old->InCache() && !old->HasRefs());
|
||||||
|
LRU_Remove(old);
|
||||||
|
table_.Remove(old->key(), old->hash);
|
||||||
|
old->SetInCache(false);
|
||||||
|
size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
assert(usage_ >= total_charge);
|
||||||
|
usage_ -= total_charge;
|
||||||
|
last_reference_list.push_back(old);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the entries here outside of mutex for performance reasons.
|
||||||
|
for (auto entry : last_reference_list) {
|
||||||
|
entry->Free();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::ApplyToSomeEntries(
|
||||||
|
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||||
|
DeleterFn deleter)>& callback,
|
||||||
|
uint32_t average_entries_per_lock, uint32_t* state) {
|
||||||
|
// The state is essentially going to be the starting hash, which works
|
||||||
|
// nicely even if we resize between calls because we use upper-most
|
||||||
|
// hash bits for table indexes.
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
uint32_t length_bits = table_.GetLengthBits();
|
||||||
|
uint32_t length = uint32_t{1} << length_bits;
|
||||||
|
|
||||||
|
assert(average_entries_per_lock > 0);
|
||||||
|
// Assuming we are called with same average_entries_per_lock repeatedly,
|
||||||
|
// this simplifies some logic (index_end will not overflow).
|
||||||
|
assert(average_entries_per_lock < length || *state == 0);
|
||||||
|
|
||||||
|
uint32_t index_begin = *state >> (32 - length_bits);
|
||||||
|
uint32_t index_end = index_begin + average_entries_per_lock;
|
||||||
|
if (index_end >= length) {
|
||||||
|
// Going to end
|
||||||
|
index_end = length;
|
||||||
|
*state = UINT32_MAX;
|
||||||
|
} else {
|
||||||
|
*state = index_end << (32 - length_bits);
|
||||||
|
}
|
||||||
|
|
||||||
|
table_.ApplyToEntriesRange(
|
||||||
|
[callback](LRUHandle* h) {
|
||||||
|
callback(h->key(), h->value, h->charge, h->deleter);
|
||||||
|
},
|
||||||
|
index_begin, index_end);
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::LRU_Remove(LRUHandle* e) {
|
||||||
|
assert(e->next != nullptr);
|
||||||
|
assert(e->prev != nullptr);
|
||||||
|
e->next->prev = e->prev;
|
||||||
|
e->prev->next = e->next;
|
||||||
|
e->prev = e->next = nullptr;
|
||||||
|
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
assert(lru_usage_ >= total_charge);
|
||||||
|
lru_usage_ -= total_charge;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::LRU_Insert(LRUHandle* e) {
|
||||||
|
assert(e->next == nullptr);
|
||||||
|
assert(e->prev == nullptr);
|
||||||
|
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
// Inset "e" to head of LRU list.
|
||||||
|
e->next = &lru_;
|
||||||
|
e->prev = lru_.prev;
|
||||||
|
e->prev->next = e;
|
||||||
|
e->next->prev = e;
|
||||||
|
lru_usage_ += total_charge;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::EvictFromLRU(size_t charge,
|
||||||
|
autovector<LRUHandle*>* deleted) {
|
||||||
|
while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
|
||||||
|
LRUHandle* old = lru_.next;
|
||||||
|
// LRU list contains only elements which can be evicted.
|
||||||
|
assert(old->InCache() && !old->HasRefs());
|
||||||
|
LRU_Remove(old);
|
||||||
|
table_.Remove(old->key(), old->hash);
|
||||||
|
old->SetInCache(false);
|
||||||
|
size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
assert(usage_ >= old_total_charge);
|
||||||
|
usage_ -= old_total_charge;
|
||||||
|
deleted->push_back(old);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::SetCapacity(size_t capacity) {
|
||||||
|
autovector<LRUHandle*> last_reference_list;
|
||||||
|
{
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
capacity_ = capacity;
|
||||||
|
EvictFromLRU(0, &last_reference_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the entries here outside of mutex for performance reasons.
|
||||||
|
for (auto entry : last_reference_list) {
|
||||||
|
entry->Free();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
strict_capacity_limit_ = strict_capacity_limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
|
||||||
|
bool free_handle_on_fail) {
|
||||||
|
Status s = Status::OK();
|
||||||
|
autovector<LRUHandle*> last_reference_list;
|
||||||
|
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
|
||||||
|
{
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
|
||||||
|
// Free the space following strict LRU policy until enough space
|
||||||
|
// is freed or the lru list is empty.
|
||||||
|
EvictFromLRU(total_charge, &last_reference_list);
|
||||||
|
|
||||||
|
if ((usage_ + total_charge) > capacity_ &&
|
||||||
|
(strict_capacity_limit_ || handle == nullptr)) {
|
||||||
|
e->SetInCache(false);
|
||||||
|
if (handle == nullptr) {
|
||||||
|
// Don't insert the entry but still return ok, as if the entry inserted
|
||||||
|
// into cache and get evicted immediately.
|
||||||
|
last_reference_list.push_back(e);
|
||||||
|
} else {
|
||||||
|
if (free_handle_on_fail) {
|
||||||
|
delete[] reinterpret_cast<char*>(e);
|
||||||
|
*handle = nullptr;
|
||||||
|
}
|
||||||
|
s = Status::Incomplete("Insert failed due to LRU cache being full.");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Insert into the cache. Note that the cache might get larger than its
|
||||||
|
// capacity if not enough space was freed up.
|
||||||
|
LRUHandle* old = table_.Insert(e);
|
||||||
|
usage_ += total_charge;
|
||||||
|
if (old != nullptr) {
|
||||||
|
s = Status::OkOverwritten();
|
||||||
|
assert(old->InCache());
|
||||||
|
old->SetInCache(false);
|
||||||
|
if (!old->HasRefs()) {
|
||||||
|
// old is on LRU because it's in cache and its reference count is 0.
|
||||||
|
LRU_Remove(old);
|
||||||
|
size_t old_total_charge =
|
||||||
|
old->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
assert(usage_ >= old_total_charge);
|
||||||
|
usage_ -= old_total_charge;
|
||||||
|
last_reference_list.push_back(old);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (handle == nullptr) {
|
||||||
|
LRU_Insert(e);
|
||||||
|
} else {
|
||||||
|
// If caller already holds a ref, no need to take one here.
|
||||||
|
if (!e->HasRefs()) {
|
||||||
|
e->Ref();
|
||||||
|
}
|
||||||
|
*handle = reinterpret_cast<Cache::Handle*>(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the entries here outside of mutex for performance reasons.
|
||||||
|
for (auto entry : last_reference_list) {
|
||||||
|
entry->Free();
|
||||||
|
}
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
|
||||||
|
LRUHandle* e = nullptr;
|
||||||
|
{
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
e = table_.Lookup(key, hash);
|
||||||
|
if (e != nullptr) {
|
||||||
|
assert(e->InCache());
|
||||||
|
if (!e->HasRefs()) {
|
||||||
|
// The entry is in LRU since it's in hash and has no external references
|
||||||
|
LRU_Remove(e);
|
||||||
|
}
|
||||||
|
e->Ref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reinterpret_cast<Cache::Handle*>(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LRUCacheShard::Ref(Cache::Handle* h) {
|
||||||
|
LRUHandle* e = reinterpret_cast<LRUHandle*>(h);
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
// To create another reference - entry must be already externally referenced.
|
||||||
|
assert(e->HasRefs());
|
||||||
|
e->Ref();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
|
||||||
|
if (handle == nullptr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
|
||||||
|
bool last_reference = false;
|
||||||
|
{
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
last_reference = e->Unref();
|
||||||
|
if (last_reference && e->InCache()) {
|
||||||
|
// The item is still in cache, and nobody else holds a reference to it.
|
||||||
|
if (usage_ > capacity_ || erase_if_last_ref) {
|
||||||
|
// The LRU list must be empty since the cache is full.
|
||||||
|
assert(lru_.next == &lru_ || erase_if_last_ref);
|
||||||
|
// Take this opportunity and remove the item.
|
||||||
|
table_.Remove(e->key(), e->hash);
|
||||||
|
e->SetInCache(false);
|
||||||
|
} else {
|
||||||
|
// Put the item back on the LRU list, and don't free it.
|
||||||
|
LRU_Insert(e);
|
||||||
|
last_reference = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If it was the last reference, then decrement the cache usage.
|
||||||
|
if (last_reference) {
|
||||||
|
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
assert(usage_ >= total_charge);
|
||||||
|
usage_ -= total_charge;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the entry here outside of mutex for performance reasons.
|
||||||
|
if (last_reference) {
|
||||||
|
e->Free();
|
||||||
|
}
|
||||||
|
return last_reference;
|
||||||
|
}
|
||||||
|
|
||||||
|
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
||||||
|
size_t charge, Cache::DeleterFn deleter,
|
||||||
|
Cache::Handle** handle,
|
||||||
|
Cache::Priority /*priority*/) {
|
||||||
|
// Allocate the memory here outside of the mutex.
|
||||||
|
// If the cache is full, we'll have to release it.
|
||||||
|
// It shouldn't happen very often though.
|
||||||
|
LRUHandle* e = reinterpret_cast<LRUHandle*>(
|
||||||
|
new char[sizeof(LRUHandle) - 1 + key.size()]);
|
||||||
|
|
||||||
|
e->value = value;
|
||||||
|
e->flags = 0;
|
||||||
|
e->deleter = deleter;
|
||||||
|
e->charge = charge;
|
||||||
|
e->key_length = key.size();
|
||||||
|
e->hash = hash;
|
||||||
|
e->refs = 0;
|
||||||
|
e->next = e->prev = nullptr;
|
||||||
|
e->SetInCache(true);
|
||||||
|
memcpy(e->key_data, key.data(), key.size());
|
||||||
|
|
||||||
|
return InsertItem(e, handle, /* free_handle_on_fail */ true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
|
||||||
|
LRUHandle* e;
|
||||||
|
bool last_reference = false;
|
||||||
|
{
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
e = table_.Remove(key, hash);
|
||||||
|
if (e != nullptr) {
|
||||||
|
assert(e->InCache());
|
||||||
|
e->SetInCache(false);
|
||||||
|
if (!e->HasRefs()) {
|
||||||
|
// The entry is in LRU since it's in hash and has no external references
|
||||||
|
LRU_Remove(e);
|
||||||
|
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||||
|
assert(usage_ >= total_charge);
|
||||||
|
usage_ -= total_charge;
|
||||||
|
last_reference = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the entry here outside of mutex for performance reasons.
|
||||||
|
// last_reference will only be true if e != nullptr.
|
||||||
|
if (last_reference) {
|
||||||
|
e->Free();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t LRUCacheShard::GetUsage() const {
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
return usage_;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t LRUCacheShard::GetPinnedUsage() const {
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
assert(usage_ >= lru_usage_);
|
||||||
|
return usage_ - lru_usage_;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string LRUCacheShard::GetPrintableOptions() const { return std::string{}; }
|
||||||
|
|
||||||
|
LRUCache::LRUCache(size_t capacity, int num_shard_bits,
|
||||||
|
bool strict_capacity_limit,
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy)
|
||||||
|
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
|
||||||
|
num_shards_ = 1 << num_shard_bits;
|
||||||
|
shards_ = reinterpret_cast<LRUCacheShard*>(
|
||||||
|
port::cacheline_aligned_alloc(sizeof(LRUCacheShard) * num_shards_));
|
||||||
|
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
|
||||||
|
for (int i = 0; i < num_shards_; i++) {
|
||||||
|
new (&shards_[i])
|
||||||
|
LRUCacheShard(per_shard, strict_capacity_limit, metadata_charge_policy,
|
||||||
|
/* max_upper_hash_bits */ 32 - num_shard_bits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LRUCache::~LRUCache() {
|
||||||
|
if (shards_ != nullptr) {
|
||||||
|
assert(num_shards_ > 0);
|
||||||
|
for (int i = 0; i < num_shards_; i++) {
|
||||||
|
shards_[i].~LRUCacheShard();
|
||||||
|
}
|
||||||
|
port::cacheline_aligned_free(shards_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CacheShard* LRUCache::GetShard(uint32_t shard) {
|
||||||
|
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const CacheShard* LRUCache::GetShard(uint32_t shard) const {
|
||||||
|
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* LRUCache::Value(Handle* handle) {
|
||||||
|
return reinterpret_cast<const LRUHandle*>(handle)->value;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t LRUCache::GetCharge(Handle* handle) const {
|
||||||
|
return reinterpret_cast<const LRUHandle*>(handle)->charge;
|
||||||
|
}
|
||||||
|
|
||||||
|
Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const {
|
||||||
|
auto h = reinterpret_cast<const LRUHandle*>(handle);
|
||||||
|
return h->deleter;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t LRUCache::GetHash(Handle* handle) const {
|
||||||
|
return reinterpret_cast<const LRUHandle*>(handle)->hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LRUCache::DisownData() {
|
||||||
|
// Leak data only if that won't generate an ASAN/valgrind warning.
|
||||||
|
if (!kMustFreeHeapAllocations) {
|
||||||
|
shards_ = nullptr;
|
||||||
|
num_shards_ = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace fast_lru_cache
|
||||||
|
|
||||||
|
std::shared_ptr<Cache> NewFastLRUCache(
|
||||||
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||||
|
if (num_shard_bits >= 20) {
|
||||||
|
return nullptr; // The cache cannot be sharded into too many fine pieces.
|
||||||
|
}
|
||||||
|
if (num_shard_bits < 0) {
|
||||||
|
num_shard_bits = GetDefaultCacheShardBits(capacity);
|
||||||
|
}
|
||||||
|
return std::make_shared<fast_lru_cache::LRUCache>(
|
||||||
|
capacity, num_shard_bits, strict_capacity_limit, metadata_charge_policy);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace ROCKSDB_NAMESPACE
|
299
cache/fast_lru_cache.h
vendored
Normal file
299
cache/fast_lru_cache.h
vendored
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved
|
||||||
|
// This source code is licensed under both the GPLv2 (found in the
|
||||||
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
//
|
||||||
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "cache/sharded_cache.h"
|
||||||
|
#include "port/lang.h"
|
||||||
|
#include "port/malloc.h"
|
||||||
|
#include "port/port.h"
|
||||||
|
#include "rocksdb/secondary_cache.h"
|
||||||
|
#include "util/autovector.h"
|
||||||
|
|
||||||
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
namespace fast_lru_cache {
|
||||||
|
|
||||||
|
// An experimental (under development!) alternative to LRUCache
|
||||||
|
|
||||||
|
struct LRUHandle {
|
||||||
|
void* value;
|
||||||
|
Cache::DeleterFn deleter;
|
||||||
|
LRUHandle* next_hash;
|
||||||
|
LRUHandle* next;
|
||||||
|
LRUHandle* prev;
|
||||||
|
size_t charge; // TODO(opt): Only allow uint32_t?
|
||||||
|
size_t key_length;
|
||||||
|
// The hash of key(). Used for fast sharding and comparisons.
|
||||||
|
uint32_t hash;
|
||||||
|
// The number of external refs to this entry. The cache itself is not counted.
|
||||||
|
uint32_t refs;
|
||||||
|
|
||||||
|
enum Flags : uint8_t {
|
||||||
|
// Whether this entry is referenced by the hash table.
|
||||||
|
IN_CACHE = (1 << 0),
|
||||||
|
};
|
||||||
|
uint8_t flags;
|
||||||
|
|
||||||
|
// Beginning of the key (MUST BE THE LAST FIELD IN THIS STRUCT!)
|
||||||
|
char key_data[1];
|
||||||
|
|
||||||
|
Slice key() const { return Slice(key_data, key_length); }
|
||||||
|
|
||||||
|
// Increase the reference count by 1.
|
||||||
|
void Ref() { refs++; }
|
||||||
|
|
||||||
|
// Just reduce the reference count by 1. Return true if it was last reference.
|
||||||
|
bool Unref() {
|
||||||
|
assert(refs > 0);
|
||||||
|
refs--;
|
||||||
|
return refs == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if there are external refs, false otherwise.
|
||||||
|
bool HasRefs() const { return refs > 0; }
|
||||||
|
|
||||||
|
bool InCache() const { return flags & IN_CACHE; }
|
||||||
|
|
||||||
|
void SetInCache(bool in_cache) {
|
||||||
|
if (in_cache) {
|
||||||
|
flags |= IN_CACHE;
|
||||||
|
} else {
|
||||||
|
flags &= ~IN_CACHE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free() {
|
||||||
|
assert(refs == 0);
|
||||||
|
if (deleter) {
|
||||||
|
(*deleter)(key(), value);
|
||||||
|
}
|
||||||
|
delete[] reinterpret_cast<char*>(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the memory usage by metadata.
|
||||||
|
inline size_t CalcTotalCharge(
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||||
|
size_t meta_charge = 0;
|
||||||
|
if (metadata_charge_policy == kFullChargeCacheMetadata) {
|
||||||
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||||
|
meta_charge += malloc_usable_size(static_cast<void*>(this));
|
||||||
|
#else
|
||||||
|
// This is the size that is used when a new handle is created.
|
||||||
|
meta_charge += sizeof(LRUHandle) - 1 + key_length;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
return charge + meta_charge;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// We provide our own simple hash table since it removes a whole bunch
|
||||||
|
// of porting hacks and is also faster than some of the built-in hash
|
||||||
|
// table implementations in some of the compiler/runtime combinations
|
||||||
|
// we have tested. E.g., readrandom speeds up by ~5% over the g++
|
||||||
|
// 4.4.3's builtin hashtable.
|
||||||
|
class LRUHandleTable {
|
||||||
|
public:
|
||||||
|
// If the table uses more hash bits than `max_upper_hash_bits`,
|
||||||
|
// it will eat into the bits used for sharding, which are constant
|
||||||
|
// for a given LRUHandleTable.
|
||||||
|
explicit LRUHandleTable(int max_upper_hash_bits);
|
||||||
|
~LRUHandleTable();
|
||||||
|
|
||||||
|
LRUHandle* Lookup(const Slice& key, uint32_t hash);
|
||||||
|
LRUHandle* Insert(LRUHandle* h);
|
||||||
|
LRUHandle* Remove(const Slice& key, uint32_t hash);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void ApplyToEntriesRange(T func, uint32_t index_begin, uint32_t index_end) {
|
||||||
|
for (uint32_t i = index_begin; i < index_end; i++) {
|
||||||
|
LRUHandle* h = list_[i];
|
||||||
|
while (h != nullptr) {
|
||||||
|
auto n = h->next_hash;
|
||||||
|
assert(h->InCache());
|
||||||
|
func(h);
|
||||||
|
h = n;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int GetLengthBits() const { return length_bits_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Return a pointer to slot that points to a cache entry that
|
||||||
|
// matches key/hash. If there is no such cache entry, return a
|
||||||
|
// pointer to the trailing slot in the corresponding linked list.
|
||||||
|
LRUHandle** FindPointer(const Slice& key, uint32_t hash);
|
||||||
|
|
||||||
|
void Resize();
|
||||||
|
|
||||||
|
// Number of hash bits (upper because lower bits used for sharding)
|
||||||
|
// used for table index. Length == 1 << length_bits_
|
||||||
|
int length_bits_;
|
||||||
|
|
||||||
|
// The table consists of an array of buckets where each bucket is
|
||||||
|
// a linked list of cache entries that hash into the bucket.
|
||||||
|
std::unique_ptr<LRUHandle*[]> list_;
|
||||||
|
|
||||||
|
// Number of elements currently in the table.
|
||||||
|
uint32_t elems_;
|
||||||
|
|
||||||
|
// Set from max_upper_hash_bits (see constructor).
|
||||||
|
const int max_length_bits_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// A single shard of sharded cache.
|
||||||
|
class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||||
|
public:
|
||||||
|
LRUCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy,
|
||||||
|
int max_upper_hash_bits);
|
||||||
|
~LRUCacheShard() override = default;
|
||||||
|
|
||||||
|
// Separate from constructor so caller can easily make an array of LRUCache
|
||||||
|
// if current usage is more than new capacity, the function will attempt to
|
||||||
|
// free the needed space.
|
||||||
|
void SetCapacity(size_t capacity) override;
|
||||||
|
|
||||||
|
// Set the flag to reject insertion if cache if full.
|
||||||
|
void SetStrictCapacityLimit(bool strict_capacity_limit) override;
|
||||||
|
|
||||||
|
// Like Cache methods, but with an extra "hash" parameter.
|
||||||
|
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
||||||
|
Cache::DeleterFn deleter, Cache::Handle** handle,
|
||||||
|
Cache::Priority priority) override;
|
||||||
|
|
||||||
|
Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||||
|
const Cache::CacheItemHelper* helper, size_t charge,
|
||||||
|
Cache::Handle** handle, Cache::Priority priority) override {
|
||||||
|
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
Cache::Handle* Lookup(const Slice& key, uint32_t hash,
|
||||||
|
const Cache::CacheItemHelper* /*helper*/,
|
||||||
|
const Cache::CreateCallback& /*create_cb*/,
|
||||||
|
Cache::Priority /*priority*/, bool /*wait*/,
|
||||||
|
Statistics* /*stats*/) override {
|
||||||
|
return Lookup(key, hash);
|
||||||
|
}
|
||||||
|
Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
|
||||||
|
|
||||||
|
bool Release(Cache::Handle* handle, bool /*useful*/,
|
||||||
|
bool erase_if_last_ref) override {
|
||||||
|
return Release(handle, erase_if_last_ref);
|
||||||
|
}
|
||||||
|
bool IsReady(Cache::Handle* /*handle*/) override { return true; }
|
||||||
|
void Wait(Cache::Handle* /*handle*/) override {}
|
||||||
|
|
||||||
|
bool Ref(Cache::Handle* handle) override;
|
||||||
|
bool Release(Cache::Handle* handle, bool erase_if_last_ref = false) override;
|
||||||
|
void Erase(const Slice& key, uint32_t hash) override;
|
||||||
|
|
||||||
|
size_t GetUsage() const override;
|
||||||
|
size_t GetPinnedUsage() const override;
|
||||||
|
|
||||||
|
void ApplyToSomeEntries(
|
||||||
|
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||||
|
DeleterFn deleter)>& callback,
|
||||||
|
uint32_t average_entries_per_lock, uint32_t* state) override;
|
||||||
|
|
||||||
|
void EraseUnRefEntries() override;
|
||||||
|
|
||||||
|
std::string GetPrintableOptions() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class LRUCache;
|
||||||
|
// Insert an item into the hash table and, if handle is null, insert into
|
||||||
|
// the LRU list. Older items are evicted as necessary. If the cache is full
|
||||||
|
// and free_handle_on_fail is true, the item is deleted and handle is set to
|
||||||
|
// nullptr.
|
||||||
|
Status InsertItem(LRUHandle* item, Cache::Handle** handle,
|
||||||
|
bool free_handle_on_fail);
|
||||||
|
|
||||||
|
void LRU_Remove(LRUHandle* e);
|
||||||
|
void LRU_Insert(LRUHandle* e);
|
||||||
|
|
||||||
|
// Free some space following strict LRU policy until enough space
|
||||||
|
// to hold (usage_ + charge) is freed or the lru list is empty
|
||||||
|
// This function is not thread safe - it needs to be executed while
|
||||||
|
// holding the mutex_.
|
||||||
|
void EvictFromLRU(size_t charge, autovector<LRUHandle*>* deleted);
|
||||||
|
|
||||||
|
// Initialized before use.
|
||||||
|
size_t capacity_;
|
||||||
|
|
||||||
|
// Whether to reject insertion if cache reaches its full capacity.
|
||||||
|
bool strict_capacity_limit_;
|
||||||
|
|
||||||
|
// Dummy head of LRU list.
|
||||||
|
// lru.prev is newest entry, lru.next is oldest entry.
|
||||||
|
// LRU contains items which can be evicted, ie reference only by cache
|
||||||
|
LRUHandle lru_;
|
||||||
|
|
||||||
|
// Pointer to head of low-pri pool in LRU list.
|
||||||
|
LRUHandle* lru_low_pri_;
|
||||||
|
|
||||||
|
// ------------^^^^^^^^^^^^^-----------
|
||||||
|
// Not frequently modified data members
|
||||||
|
// ------------------------------------
|
||||||
|
//
|
||||||
|
// We separate data members that are updated frequently from the ones that
|
||||||
|
// are not frequently updated so that they don't share the same cache line
|
||||||
|
// which will lead into false cache sharing
|
||||||
|
//
|
||||||
|
// ------------------------------------
|
||||||
|
// Frequently modified data members
|
||||||
|
// ------------vvvvvvvvvvvvv-----------
|
||||||
|
LRUHandleTable table_;
|
||||||
|
|
||||||
|
// Memory size for entries residing in the cache.
|
||||||
|
size_t usage_;
|
||||||
|
|
||||||
|
// Memory size for entries residing only in the LRU list.
|
||||||
|
size_t lru_usage_;
|
||||||
|
|
||||||
|
// mutex_ protects the following state.
|
||||||
|
// We don't count mutex_ as the cache's internal state so semantically we
|
||||||
|
// don't mind mutex_ invoking the non-const actions.
|
||||||
|
mutable port::Mutex mutex_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class LRUCache
|
||||||
|
#ifdef NDEBUG
|
||||||
|
final
|
||||||
|
#endif
|
||||||
|
: public ShardedCache {
|
||||||
|
public:
|
||||||
|
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy =
|
||||||
|
kDontChargeCacheMetadata);
|
||||||
|
~LRUCache() override;
|
||||||
|
const char* Name() const override { return "LRUCache"; }
|
||||||
|
CacheShard* GetShard(uint32_t shard) override;
|
||||||
|
const CacheShard* GetShard(uint32_t shard) const override;
|
||||||
|
void* Value(Handle* handle) override;
|
||||||
|
size_t GetCharge(Handle* handle) const override;
|
||||||
|
uint32_t GetHash(Handle* handle) const override;
|
||||||
|
DeleterFn GetDeleter(Handle* handle) const override;
|
||||||
|
void DisownData() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
LRUCacheShard* shards_ = nullptr;
|
||||||
|
int num_shards_ = 0;
|
||||||
|
};
|
||||||
|
} // namespace fast_lru_cache
|
||||||
|
|
||||||
|
std::shared_ptr<Cache> NewFastLRUCache(
|
||||||
|
size_t capacity, int num_shard_bits = -1,
|
||||||
|
bool strict_capacity_limit = false,
|
||||||
|
CacheMetadataChargePolicy metadata_charge_policy =
|
||||||
|
kDefaultCacheMetadataChargePolicy);
|
||||||
|
|
||||||
|
} // namespace ROCKSDB_NAMESPACE
|
13
cache/lru_cache.cc
vendored
13
cache/lru_cache.cc
vendored
@ -19,6 +19,7 @@
|
|||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
namespace lru_cache {
|
||||||
|
|
||||||
LRUHandleTable::LRUHandleTable(int max_upper_hash_bits)
|
LRUHandleTable::LRUHandleTable(int max_upper_hash_bits)
|
||||||
: length_bits_(/* historical starting size*/ 4),
|
: length_bits_(/* historical starting size*/ 4),
|
||||||
@ -298,7 +299,7 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
|
|||||||
// Free the entries outside of mutex for performance reasons.
|
// Free the entries outside of mutex for performance reasons.
|
||||||
for (auto entry : last_reference_list) {
|
for (auto entry : last_reference_list) {
|
||||||
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
|
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
|
||||||
!entry->IsPromoted()) {
|
!entry->IsInSecondaryCache()) {
|
||||||
secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
|
secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
|
||||||
.PermitUncheckedError();
|
.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
@ -373,7 +374,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
|
|||||||
// Free the entries here outside of mutex for performance reasons.
|
// Free the entries here outside of mutex for performance reasons.
|
||||||
for (auto entry : last_reference_list) {
|
for (auto entry : last_reference_list) {
|
||||||
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
|
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
|
||||||
!entry->IsPromoted()) {
|
!entry->IsInSecondaryCache()) {
|
||||||
secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
|
secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
|
||||||
.PermitUncheckedError();
|
.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
@ -389,7 +390,6 @@ void LRUCacheShard::Promote(LRUHandle* e) {
|
|||||||
assert(secondary_handle->IsReady());
|
assert(secondary_handle->IsReady());
|
||||||
e->SetIncomplete(false);
|
e->SetIncomplete(false);
|
||||||
e->SetInCache(true);
|
e->SetInCache(true);
|
||||||
e->SetPromoted(true);
|
|
||||||
e->value = secondary_handle->Value();
|
e->value = secondary_handle->Value();
|
||||||
e->charge = secondary_handle->Size();
|
e->charge = secondary_handle->Size();
|
||||||
delete secondary_handle;
|
delete secondary_handle;
|
||||||
@ -446,8 +446,9 @@ Cache::Handle* LRUCacheShard::Lookup(
|
|||||||
// accounting purposes, which we won't demote to the secondary cache
|
// accounting purposes, which we won't demote to the secondary cache
|
||||||
// anyway.
|
// anyway.
|
||||||
assert(create_cb && helper->del_cb);
|
assert(create_cb && helper->del_cb);
|
||||||
|
bool is_in_sec_cache{false};
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle =
|
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle =
|
||||||
secondary_cache_->Lookup(key, create_cb, wait);
|
secondary_cache_->Lookup(key, create_cb, wait, is_in_sec_cache);
|
||||||
if (secondary_handle != nullptr) {
|
if (secondary_handle != nullptr) {
|
||||||
e = reinterpret_cast<LRUHandle*>(
|
e = reinterpret_cast<LRUHandle*>(
|
||||||
new char[sizeof(LRUHandle) - 1 + key.size()]);
|
new char[sizeof(LRUHandle) - 1 + key.size()]);
|
||||||
@ -467,6 +468,7 @@ Cache::Handle* LRUCacheShard::Lookup(
|
|||||||
|
|
||||||
if (wait) {
|
if (wait) {
|
||||||
Promote(e);
|
Promote(e);
|
||||||
|
e->SetIsInSecondaryCache(is_in_sec_cache);
|
||||||
if (!e->value) {
|
if (!e->value) {
|
||||||
// The secondary cache returned a handle, but the lookup failed.
|
// The secondary cache returned a handle, but the lookup failed.
|
||||||
e->Unref();
|
e->Unref();
|
||||||
@ -480,6 +482,7 @@ Cache::Handle* LRUCacheShard::Lookup(
|
|||||||
// If wait is false, we always return a handle and let the caller
|
// If wait is false, we always return a handle and let the caller
|
||||||
// release the handle after checking for success or failure.
|
// release the handle after checking for success or failure.
|
||||||
e->SetIncomplete(true);
|
e->SetIncomplete(true);
|
||||||
|
e->SetIsInSecondaryCache(is_in_sec_cache);
|
||||||
// This may be slightly inaccurate, if the lookup eventually fails.
|
// This may be slightly inaccurate, if the lookup eventually fails.
|
||||||
// But the probability is very low.
|
// But the probability is very low.
|
||||||
PERF_COUNTER_ADD(secondary_cache_hit_count, 1);
|
PERF_COUNTER_ADD(secondary_cache_hit_count, 1);
|
||||||
@ -757,6 +760,8 @@ void LRUCache::WaitAll(std::vector<Handle*>& handles) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace lru_cache
|
||||||
|
|
||||||
std::shared_ptr<Cache> NewLRUCache(
|
std::shared_ptr<Cache> NewLRUCache(
|
||||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||||
double high_pri_pool_ratio,
|
double high_pri_pool_ratio,
|
||||||
|
26
cache/lru_cache.h
vendored
26
cache/lru_cache.h
vendored
@ -19,6 +19,7 @@
|
|||||||
#include "util/autovector.h"
|
#include "util/autovector.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
namespace lru_cache {
|
||||||
|
|
||||||
// LRU cache implementation. This class is not thread-safe.
|
// LRU cache implementation. This class is not thread-safe.
|
||||||
|
|
||||||
@ -85,8 +86,8 @@ struct LRUHandle {
|
|||||||
IS_SECONDARY_CACHE_COMPATIBLE = (1 << 4),
|
IS_SECONDARY_CACHE_COMPATIBLE = (1 << 4),
|
||||||
// Is the handle still being read from a lower tier.
|
// Is the handle still being read from a lower tier.
|
||||||
IS_PENDING = (1 << 5),
|
IS_PENDING = (1 << 5),
|
||||||
// Has the item been promoted from a lower tier.
|
// Whether this handle is still in a lower tier
|
||||||
IS_PROMOTED = (1 << 6),
|
IS_IN_SECONDARY_CACHE = (1 << 6),
|
||||||
};
|
};
|
||||||
|
|
||||||
uint8_t flags;
|
uint8_t flags;
|
||||||
@ -129,7 +130,7 @@ struct LRUHandle {
|
|||||||
#endif // __SANITIZE_THREAD__
|
#endif // __SANITIZE_THREAD__
|
||||||
}
|
}
|
||||||
bool IsPending() const { return flags & IS_PENDING; }
|
bool IsPending() const { return flags & IS_PENDING; }
|
||||||
bool IsPromoted() const { return flags & IS_PROMOTED; }
|
bool IsInSecondaryCache() const { return flags & IS_IN_SECONDARY_CACHE; }
|
||||||
|
|
||||||
void SetInCache(bool in_cache) {
|
void SetInCache(bool in_cache) {
|
||||||
if (in_cache) {
|
if (in_cache) {
|
||||||
@ -176,11 +177,11 @@ struct LRUHandle {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetPromoted(bool promoted) {
|
void SetIsInSecondaryCache(bool is_in_secondary_cache) {
|
||||||
if (promoted) {
|
if (is_in_secondary_cache) {
|
||||||
flags |= IS_PROMOTED;
|
flags |= IS_IN_SECONDARY_CACHE;
|
||||||
} else {
|
} else {
|
||||||
flags &= ~IS_PROMOTED;
|
flags &= ~IS_IN_SECONDARY_CACHE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -371,8 +372,9 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
|||||||
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
||||||
DeleterFn deleter, const Cache::CacheItemHelper* helper,
|
DeleterFn deleter, const Cache::CacheItemHelper* helper,
|
||||||
Cache::Handle** handle, Cache::Priority priority);
|
Cache::Handle** handle, Cache::Priority priority);
|
||||||
// Promote an item looked up from the secondary cache to the LRU cache. The
|
// Promote an item looked up from the secondary cache to the LRU cache.
|
||||||
// item is only inserted into the hash table and not the LRU list, and only
|
// The item may be still in the secondary cache.
|
||||||
|
// It is only inserted into the hash table and not the LRU list, and only
|
||||||
// if the cache is not at full capacity, as is the case during Insert. The
|
// if the cache is not at full capacity, as is the case during Insert. The
|
||||||
// caller should hold a reference on the LRUHandle. When the caller releases
|
// caller should hold a reference on the LRUHandle. When the caller releases
|
||||||
// the last reference, the item is added to the LRU list.
|
// the last reference, the item is added to the LRU list.
|
||||||
@ -478,4 +480,10 @@ class LRUCache
|
|||||||
std::shared_ptr<SecondaryCache> secondary_cache_;
|
std::shared_ptr<SecondaryCache> secondary_cache_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace lru_cache
|
||||||
|
|
||||||
|
using LRUCache = lru_cache::LRUCache;
|
||||||
|
using LRUHandle = lru_cache::LRUHandle;
|
||||||
|
using LRUCacheShard = lru_cache::LRUCacheShard;
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
100
cache/lru_cache_test.cc
vendored
100
cache/lru_cache_test.cc
vendored
@ -266,12 +266,13 @@ class TestSecondaryCache : public SecondaryCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
||||||
const Slice& key, const Cache::CreateCallback& create_cb,
|
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
||||||
bool /*wait*/) override {
|
bool& is_in_sec_cache) override {
|
||||||
std::string key_str = key.ToString();
|
std::string key_str = key.ToString();
|
||||||
TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str);
|
TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str);
|
||||||
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle;
|
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle;
|
||||||
|
is_in_sec_cache = false;
|
||||||
ResultType type = ResultType::SUCCESS;
|
ResultType type = ResultType::SUCCESS;
|
||||||
auto iter = result_map_.find(key.ToString());
|
auto iter = result_map_.find(key.ToString());
|
||||||
if (iter != result_map_.end()) {
|
if (iter != result_map_.end()) {
|
||||||
@ -296,6 +297,7 @@ class TestSecondaryCache : public SecondaryCache {
|
|||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
secondary_handle.reset(new TestSecondaryCacheResultHandle(
|
secondary_handle.reset(new TestSecondaryCacheResultHandle(
|
||||||
cache_.get(), handle, value, charge, type));
|
cache_.get(), handle, value, charge, type));
|
||||||
|
is_in_sec_cache = true;
|
||||||
} else {
|
} else {
|
||||||
cache_->Release(handle);
|
cache_->Release(handle);
|
||||||
}
|
}
|
||||||
@ -383,10 +385,10 @@ class DBSecondaryCacheTest : public DBTestBase {
|
|||||||
std::unique_ptr<Env> fault_env_;
|
std::unique_ptr<Env> fault_env_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class LRUSecondaryCacheTest : public LRUCacheTest {
|
class LRUCacheSecondaryCacheTest : public LRUCacheTest {
|
||||||
public:
|
public:
|
||||||
LRUSecondaryCacheTest() : fail_create_(false) {}
|
LRUCacheSecondaryCacheTest() : fail_create_(false) {}
|
||||||
~LRUSecondaryCacheTest() {}
|
~LRUCacheSecondaryCacheTest() {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
class TestItem {
|
class TestItem {
|
||||||
@ -449,16 +451,17 @@ class LRUSecondaryCacheTest : public LRUCacheTest {
|
|||||||
bool fail_create_;
|
bool fail_create_;
|
||||||
};
|
};
|
||||||
|
|
||||||
Cache::CacheItemHelper LRUSecondaryCacheTest::helper_(
|
Cache::CacheItemHelper LRUCacheSecondaryCacheTest::helper_(
|
||||||
LRUSecondaryCacheTest::SizeCallback, LRUSecondaryCacheTest::SaveToCallback,
|
LRUCacheSecondaryCacheTest::SizeCallback,
|
||||||
LRUSecondaryCacheTest::DeletionCallback);
|
LRUCacheSecondaryCacheTest::SaveToCallback,
|
||||||
|
LRUCacheSecondaryCacheTest::DeletionCallback);
|
||||||
|
|
||||||
Cache::CacheItemHelper LRUSecondaryCacheTest::helper_fail_(
|
Cache::CacheItemHelper LRUCacheSecondaryCacheTest::helper_fail_(
|
||||||
LRUSecondaryCacheTest::SizeCallback,
|
LRUCacheSecondaryCacheTest::SizeCallback,
|
||||||
LRUSecondaryCacheTest::SaveToCallbackFail,
|
LRUCacheSecondaryCacheTest::SaveToCallbackFail,
|
||||||
LRUSecondaryCacheTest::DeletionCallback);
|
LRUCacheSecondaryCacheTest::DeletionCallback);
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicTest) {
|
TEST_F(LRUCacheSecondaryCacheTest, BasicTest) {
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
||||||
@ -470,25 +473,25 @@ TEST_F(LRUSecondaryCacheTest, BasicTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1, &LRUCacheSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to NVM
|
// k1 should be demoted to NVM
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k2", item2, &LRUCacheSecondaryCacheTest::helper_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
get_perf_context()->Reset();
|
get_perf_context()->Reset();
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
handle =
|
handle =
|
||||||
cache->Lookup("k2", &LRUSecondaryCacheTest::helper_, test_item_creator,
|
cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
Cache::Priority::LOW, true, stats.get());
|
test_item_creator, Cache::Priority::LOW, true, stats.get());
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
// This lookup should promote k1 and demote k2
|
// This lookup should promote k1 and demote k2
|
||||||
handle =
|
handle =
|
||||||
cache->Lookup("k1", &LRUSecondaryCacheTest::helper_, test_item_creator,
|
cache->Lookup("k1", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
Cache::Priority::LOW, true, stats.get());
|
test_item_creator, Cache::Priority::LOW, true, stats.get());
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
ASSERT_EQ(secondary_cache->num_inserts(), 2u);
|
ASSERT_EQ(secondary_cache->num_inserts(), 2u);
|
||||||
@ -502,7 +505,7 @@ TEST_F(LRUSecondaryCacheTest, BasicTest) {
|
|||||||
secondary_cache.reset();
|
secondary_cache.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicFailTest) {
|
TEST_F(LRUCacheSecondaryCacheTest, BasicFailTest) {
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
||||||
@ -515,15 +518,15 @@ TEST_F(LRUSecondaryCacheTest, BasicFailTest) {
|
|||||||
auto item1 = std::make_unique<TestItem>(str1.data(), str1.length());
|
auto item1 = std::make_unique<TestItem>(str1.data(), str1.length());
|
||||||
ASSERT_TRUE(cache->Insert("k1", item1.get(), nullptr, str1.length())
|
ASSERT_TRUE(cache->Insert("k1", item1.get(), nullptr, str1.length())
|
||||||
.IsInvalidArgument());
|
.IsInvalidArgument());
|
||||||
ASSERT_OK(cache->Insert("k1", item1.get(), &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1.get(),
|
||||||
str1.length()));
|
&LRUCacheSecondaryCacheTest::helper_, str1.length()));
|
||||||
item1.release(); // Appease clang-analyze "potential memory leak"
|
item1.release(); // Appease clang-analyze "potential memory leak"
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
handle = cache->Lookup("k2", nullptr, test_item_creator, Cache::Priority::LOW,
|
handle = cache->Lookup("k2", nullptr, test_item_creator, Cache::Priority::LOW,
|
||||||
true);
|
true);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, false);
|
test_item_creator, Cache::Priority::LOW, false);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
|
|
||||||
@ -531,7 +534,7 @@ TEST_F(LRUSecondaryCacheTest, BasicFailTest) {
|
|||||||
secondary_cache.reset();
|
secondary_cache.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, SaveFailTest) {
|
TEST_F(LRUCacheSecondaryCacheTest, SaveFailTest) {
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
||||||
@ -542,25 +545,25 @@ TEST_F(LRUSecondaryCacheTest, SaveFailTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_fail_,
|
ASSERT_OK(cache->Insert(
|
||||||
str1.length()));
|
"k1", item1, &LRUCacheSecondaryCacheTest::helper_fail_, str1.length()));
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to NVM
|
// k1 should be demoted to NVM
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_fail_,
|
ASSERT_OK(cache->Insert(
|
||||||
str2.length()));
|
"k2", item2, &LRUCacheSecondaryCacheTest::helper_fail_, str2.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_fail_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_fail_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
// This lookup should fail, since k1 demotion would have failed
|
// This lookup should fail, since k1 demotion would have failed
|
||||||
handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_fail_,
|
handle = cache->Lookup("k1", &LRUCacheSecondaryCacheTest::helper_fail_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
// Since k1 didn't get promoted, k2 should still be in cache
|
// Since k1 didn't get promoted, k2 should still be in cache
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_fail_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_fail_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
@ -571,7 +574,7 @@ TEST_F(LRUSecondaryCacheTest, SaveFailTest) {
|
|||||||
secondary_cache.reset();
|
secondary_cache.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, CreateFailTest) {
|
TEST_F(LRUCacheSecondaryCacheTest, CreateFailTest) {
|
||||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
||||||
@ -582,26 +585,26 @@ TEST_F(LRUSecondaryCacheTest, CreateFailTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1, &LRUCacheSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to NVM
|
// k1 should be demoted to NVM
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k2", item2, &LRUCacheSecondaryCacheTest::helper_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
SetFailCreate(true);
|
SetFailCreate(true);
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
// This lookup should fail, since k1 creation would have failed
|
// This lookup should fail, since k1 creation would have failed
|
||||||
handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k1", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_EQ(handle, nullptr);
|
ASSERT_EQ(handle, nullptr);
|
||||||
// Since k1 didn't get promoted, k2 should still be in cache
|
// Since k1 didn't get promoted, k2 should still be in cache
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
@ -612,7 +615,7 @@ TEST_F(LRUSecondaryCacheTest, CreateFailTest) {
|
|||||||
secondary_cache.reset();
|
secondary_cache.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, FullCapacityTest) {
|
TEST_F(LRUCacheSecondaryCacheTest, FullCapacityTest) {
|
||||||
LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
|
LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
|
||||||
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
|
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
|
||||||
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
||||||
@ -623,28 +626,28 @@ TEST_F(LRUSecondaryCacheTest, FullCapacityTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string str1 = rnd.RandomString(1020);
|
std::string str1 = rnd.RandomString(1020);
|
||||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||||
ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k1", item1, &LRUCacheSecondaryCacheTest::helper_,
|
||||||
str1.length()));
|
str1.length()));
|
||||||
std::string str2 = rnd.RandomString(1020);
|
std::string str2 = rnd.RandomString(1020);
|
||||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||||
// k1 should be demoted to NVM
|
// k1 should be demoted to NVM
|
||||||
ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
|
ASSERT_OK(cache->Insert("k2", item2, &LRUCacheSecondaryCacheTest::helper_,
|
||||||
str2.length()));
|
str2.length()));
|
||||||
|
|
||||||
Cache::Handle* handle;
|
Cache::Handle* handle;
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
// k1 promotion should fail due to the block cache being at capacity,
|
// k1 promotion should fail due to the block cache being at capacity,
|
||||||
// but the lookup should still succeed
|
// but the lookup should still succeed
|
||||||
Cache::Handle* handle2;
|
Cache::Handle* handle2;
|
||||||
handle2 = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
|
handle2 = cache->Lookup("k1", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle2, nullptr);
|
ASSERT_NE(handle2, nullptr);
|
||||||
// Since k1 didn't get inserted, k2 should still be in cache
|
// Since k1 didn't get inserted, k2 should still be in cache
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
cache->Release(handle2);
|
cache->Release(handle2);
|
||||||
handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
|
handle = cache->Lookup("k2", &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, true);
|
test_item_creator, Cache::Priority::LOW, true);
|
||||||
ASSERT_NE(handle, nullptr);
|
ASSERT_NE(handle, nullptr);
|
||||||
cache->Release(handle);
|
cache->Release(handle);
|
||||||
@ -1046,7 +1049,7 @@ TEST_F(DBSecondaryCacheTest, SecondaryCacheFailureTest) {
|
|||||||
Destroy(options);
|
Destroy(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LRUSecondaryCacheTest, BasicWaitAllTest) {
|
TEST_F(LRUCacheSecondaryCacheTest, BasicWaitAllTest) {
|
||||||
LRUCacheOptions opts(1024, 2, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
LRUCacheOptions opts(1024, 2, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||||
kDontChargeCacheMetadata);
|
kDontChargeCacheMetadata);
|
||||||
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
std::shared_ptr<TestSecondaryCache> secondary_cache =
|
||||||
@ -1062,7 +1065,8 @@ TEST_F(LRUSecondaryCacheTest, BasicWaitAllTest) {
|
|||||||
values.emplace_back(str);
|
values.emplace_back(str);
|
||||||
TestItem* item = new TestItem(str.data(), str.length());
|
TestItem* item = new TestItem(str.data(), str.length());
|
||||||
ASSERT_OK(cache->Insert("k" + std::to_string(i), item,
|
ASSERT_OK(cache->Insert("k" + std::to_string(i), item,
|
||||||
&LRUSecondaryCacheTest::helper_, str.length()));
|
&LRUCacheSecondaryCacheTest::helper_,
|
||||||
|
str.length()));
|
||||||
}
|
}
|
||||||
// Force all entries to be evicted to the secondary cache
|
// Force all entries to be evicted to the secondary cache
|
||||||
cache->SetCapacity(0);
|
cache->SetCapacity(0);
|
||||||
@ -1075,9 +1079,9 @@ TEST_F(LRUSecondaryCacheTest, BasicWaitAllTest) {
|
|||||||
{"k5", TestSecondaryCache::ResultType::FAIL}});
|
{"k5", TestSecondaryCache::ResultType::FAIL}});
|
||||||
std::vector<Cache::Handle*> results;
|
std::vector<Cache::Handle*> results;
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
results.emplace_back(
|
results.emplace_back(cache->Lookup(
|
||||||
cache->Lookup("k" + std::to_string(i), &LRUSecondaryCacheTest::helper_,
|
"k" + std::to_string(i), &LRUCacheSecondaryCacheTest::helper_,
|
||||||
test_item_creator, Cache::Priority::LOW, false));
|
test_item_creator, Cache::Priority::LOW, false));
|
||||||
}
|
}
|
||||||
cache->WaitAll(results);
|
cache->WaitAll(results);
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
|
30
common.mk
Normal file
30
common.mk
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
ifndef PYTHON
|
||||||
|
|
||||||
|
# Default to python3. Some distros like CentOS 8 do not have `python`.
|
||||||
|
ifeq ($(origin PYTHON), undefined)
|
||||||
|
PYTHON := $(shell which python3 || which python || echo python3)
|
||||||
|
endif
|
||||||
|
export PYTHON
|
||||||
|
|
||||||
|
endif
|
||||||
|
|
||||||
|
# To setup tmp directory, first recognize some old variables for setting
|
||||||
|
# test tmp directory or base tmp directory. TEST_TMPDIR is usually read
|
||||||
|
# by RocksDB tools though Env/FileSystem::GetTestDirectory.
|
||||||
|
ifeq ($(TEST_TMPDIR),)
|
||||||
|
TEST_TMPDIR := $(TMPD)
|
||||||
|
endif
|
||||||
|
ifeq ($(TEST_TMPDIR),)
|
||||||
|
ifeq ($(BASE_TMPDIR),)
|
||||||
|
BASE_TMPDIR :=$(TMPDIR)
|
||||||
|
endif
|
||||||
|
ifeq ($(BASE_TMPDIR),)
|
||||||
|
BASE_TMPDIR :=/tmp
|
||||||
|
endif
|
||||||
|
# Use /dev/shm if it has the sticky bit set (otherwise, /tmp or other
|
||||||
|
# base dir), and create a randomly-named rocksdb.XXXX directory therein.
|
||||||
|
TEST_TMPDIR := $(shell f=/dev/shm; test -k $$f || f=$(BASE_TMPDIR); \
|
||||||
|
perl -le 'use File::Temp "tempdir";' \
|
||||||
|
-e 'print tempdir("'$$f'/rocksdb.XXXX", CLEANUP => 0)')
|
||||||
|
endif
|
||||||
|
export TEST_TMPDIR
|
@ -5,7 +5,7 @@
|
|||||||
# build DB_STRESS_CMD so it must exist prior.
|
# build DB_STRESS_CMD so it must exist prior.
|
||||||
DB_STRESS_CMD?=./db_stress
|
DB_STRESS_CMD?=./db_stress
|
||||||
|
|
||||||
include python.mk
|
include common.mk
|
||||||
|
|
||||||
CRASHTEST_MAKE=$(MAKE) -f crash_test.mk
|
CRASHTEST_MAKE=$(MAKE) -f crash_test.mk
|
||||||
CRASHTEST_PY=$(PYTHON) -u tools/db_crashtest.py --stress_cmd=$(DB_STRESS_CMD)
|
CRASHTEST_PY=$(PYTHON) -u tools/db_crashtest.py --stress_cmd=$(DB_STRESS_CMD)
|
||||||
@ -65,10 +65,10 @@ blackbox_crash_test_with_ts: $(DB_STRESS_CMD)
|
|||||||
$(CRASHTEST_PY) --enable_ts blackbox $(CRASH_TEST_EXT_ARGS)
|
$(CRASHTEST_PY) --enable_ts blackbox $(CRASH_TEST_EXT_ARGS)
|
||||||
|
|
||||||
blackbox_crash_test_with_multiops_wc_txn: $(DB_STRESS_CMD)
|
blackbox_crash_test_with_multiops_wc_txn: $(DB_STRESS_CMD)
|
||||||
$(PYTHON) -u tools/db_crashtest.py --test_multiops_txn --write_policy write_committed blackbox $(CRASH_TEST_EXT_ARGS)
|
$(CRASHTEST_PY) --test_multiops_txn --write_policy write_committed blackbox $(CRASH_TEST_EXT_ARGS)
|
||||||
|
|
||||||
blackbox_crash_test_with_multiops_wp_txn: $(DB_STRESS_CMD)
|
blackbox_crash_test_with_multiops_wp_txn: $(DB_STRESS_CMD)
|
||||||
$(PYTHON) -u tools/db_crashtest.py --test_multiops_txn --write_policy write_prepared blackbox $(CRASH_TEST_EXT_ARGS)
|
$(CRASHTEST_PY) --test_multiops_txn --write_policy write_prepared blackbox $(CRASH_TEST_EXT_ARGS)
|
||||||
|
|
||||||
ifeq ($(CRASH_TEST_KILL_ODD),)
|
ifeq ($(CRASH_TEST_KILL_ODD),)
|
||||||
CRASH_TEST_KILL_ODD=888887
|
CRASH_TEST_KILL_ODD=888887
|
||||||
|
@ -23,7 +23,7 @@ Status ArenaWrappedDBIter::GetProperty(std::string prop_name,
|
|||||||
if (prop_name == "rocksdb.iterator.super-version-number") {
|
if (prop_name == "rocksdb.iterator.super-version-number") {
|
||||||
// First try to pass the value returned from inner iterator.
|
// First try to pass the value returned from inner iterator.
|
||||||
if (!db_iter_->GetProperty(prop_name, prop).ok()) {
|
if (!db_iter_->GetProperty(prop_name, prop).ok()) {
|
||||||
*prop = ToString(sv_number_);
|
*prop = std::to_string(sv_number_);
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
@ -96,9 +96,9 @@ class BlobIndex {
|
|||||||
assert(slice.size() > 0);
|
assert(slice.size() > 0);
|
||||||
type_ = static_cast<Type>(*slice.data());
|
type_ = static_cast<Type>(*slice.data());
|
||||||
if (type_ >= Type::kUnknown) {
|
if (type_ >= Type::kUnknown) {
|
||||||
return Status::Corruption(
|
return Status::Corruption(kErrorMessage,
|
||||||
kErrorMessage,
|
"Unknown blob index type: " +
|
||||||
"Unknown blob index type: " + ToString(static_cast<char>(type_)));
|
std::to_string(static_cast<char>(type_)));
|
||||||
}
|
}
|
||||||
slice = Slice(slice.data() + 1, slice.size() - 1);
|
slice = Slice(slice.data() + 1, slice.size() - 1);
|
||||||
if (HasTTL()) {
|
if (HasTTL()) {
|
||||||
|
@ -366,19 +366,26 @@ TEST_F(DBBlobBasicTest, GetBlob_CorruptIndex) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
constexpr char key[] = "key";
|
constexpr char key[] = "key";
|
||||||
|
constexpr char blob[] = "blob";
|
||||||
|
|
||||||
// Fake a corrupt blob index.
|
ASSERT_OK(Put(key, blob));
|
||||||
const std::string blob_index("foobar");
|
|
||||||
|
|
||||||
WriteBatch batch;
|
|
||||||
ASSERT_OK(WriteBatchInternal::PutBlobIndex(&batch, 0, key, blob_index));
|
|
||||||
ASSERT_OK(db_->Write(WriteOptions(), &batch));
|
|
||||||
|
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"Version::Get::TamperWithBlobIndex", [](void* arg) {
|
||||||
|
Slice* const blob_index = static_cast<Slice*>(arg);
|
||||||
|
assert(blob_index);
|
||||||
|
assert(!blob_index->empty());
|
||||||
|
blob_index->remove_prefix(1);
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
PinnableSlice result;
|
PinnableSlice result;
|
||||||
ASSERT_TRUE(db_->Get(ReadOptions(), db_->DefaultColumnFamily(), key, &result)
|
ASSERT_TRUE(db_->Get(ReadOptions(), db_->DefaultColumnFamily(), key, &result)
|
||||||
.IsCorruption());
|
.IsCorruption());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBBlobBasicTest, MultiGetBlob_CorruptIndex) {
|
TEST_F(DBBlobBasicTest, MultiGetBlob_CorruptIndex) {
|
||||||
@ -401,17 +408,27 @@ TEST_F(DBBlobBasicTest, MultiGetBlob_CorruptIndex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
constexpr char key[] = "key";
|
constexpr char key[] = "key";
|
||||||
{
|
constexpr char blob[] = "blob";
|
||||||
// Fake a corrupt blob index.
|
ASSERT_OK(Put(key, blob));
|
||||||
const std::string blob_index("foobar");
|
keys[kNumOfKeys] = key;
|
||||||
WriteBatch batch;
|
|
||||||
ASSERT_OK(WriteBatchInternal::PutBlobIndex(&batch, 0, key, blob_index));
|
|
||||||
ASSERT_OK(db_->Write(WriteOptions(), &batch));
|
|
||||||
keys[kNumOfKeys] = Slice(static_cast<const char*>(key), sizeof(key) - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"Version::MultiGet::TamperWithBlobIndex", [&key](void* arg) {
|
||||||
|
KeyContext* const key_context = static_cast<KeyContext*>(arg);
|
||||||
|
assert(key_context);
|
||||||
|
assert(key_context->key);
|
||||||
|
|
||||||
|
if (*(key_context->key) == key) {
|
||||||
|
Slice* const blob_index = key_context->value;
|
||||||
|
assert(blob_index);
|
||||||
|
assert(!blob_index->empty());
|
||||||
|
blob_index->remove_prefix(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
std::array<PinnableSlice, kNumOfKeys + 1> values;
|
std::array<PinnableSlice, kNumOfKeys + 1> values;
|
||||||
std::array<Status, kNumOfKeys + 1> statuses;
|
std::array<Status, kNumOfKeys + 1> statuses;
|
||||||
db_->MultiGet(ReadOptions(), dbfull()->DefaultColumnFamily(), kNumOfKeys + 1,
|
db_->MultiGet(ReadOptions(), dbfull()->DefaultColumnFamily(), kNumOfKeys + 1,
|
||||||
@ -425,6 +442,9 @@ TEST_F(DBBlobBasicTest, MultiGetBlob_CorruptIndex) {
|
|||||||
ASSERT_TRUE(statuses[i].IsCorruption());
|
ASSERT_TRUE(statuses[i].IsCorruption());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBBlobBasicTest, MultiGetBlob_ExceedSoftLimit) {
|
TEST_F(DBBlobBasicTest, MultiGetBlob_ExceedSoftLimit) {
|
||||||
@ -733,6 +753,14 @@ TEST_F(DBBlobBasicTest, Properties) {
|
|||||||
&live_blob_file_size));
|
&live_blob_file_size));
|
||||||
ASSERT_EQ(live_blob_file_size, total_expected_size);
|
ASSERT_EQ(live_blob_file_size, total_expected_size);
|
||||||
|
|
||||||
|
// Total amount of garbage in live blob files
|
||||||
|
{
|
||||||
|
uint64_t live_blob_file_garbage_size = 0;
|
||||||
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kLiveBlobFileGarbageSize,
|
||||||
|
&live_blob_file_garbage_size));
|
||||||
|
ASSERT_EQ(live_blob_file_garbage_size, 0);
|
||||||
|
}
|
||||||
|
|
||||||
// Total size of all blob files across all versions
|
// Total size of all blob files across all versions
|
||||||
// Note: this should be the same as above since we only have one
|
// Note: this should be the same as above since we only have one
|
||||||
// version at this point.
|
// version at this point.
|
||||||
@ -768,6 +796,14 @@ TEST_F(DBBlobBasicTest, Properties) {
|
|||||||
<< "\nBlob file space amplification: " << expected_space_amp << '\n';
|
<< "\nBlob file space amplification: " << expected_space_amp << '\n';
|
||||||
|
|
||||||
ASSERT_EQ(blob_stats, oss.str());
|
ASSERT_EQ(blob_stats, oss.str());
|
||||||
|
|
||||||
|
// Total amount of garbage in live blob files
|
||||||
|
{
|
||||||
|
uint64_t live_blob_file_garbage_size = 0;
|
||||||
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kLiveBlobFileGarbageSize,
|
||||||
|
&live_blob_file_garbage_size));
|
||||||
|
ASSERT_EQ(live_blob_file_garbage_size, expected_garbage_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBBlobBasicTest, PropertiesMultiVersion) {
|
TEST_F(DBBlobBasicTest, PropertiesMultiVersion) {
|
||||||
|
@ -415,16 +415,30 @@ TEST_F(DBBlobCompactionTest, CorruptedBlobIndex) {
|
|||||||
new ValueMutationFilter(""));
|
new ValueMutationFilter(""));
|
||||||
options.compaction_filter = compaction_filter_guard.get();
|
options.compaction_filter = compaction_filter_guard.get();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
// Mock a corrupted blob index
|
|
||||||
constexpr char key[] = "key";
|
constexpr char key[] = "key";
|
||||||
std::string blob_idx("blob_idx");
|
constexpr char blob[] = "blob";
|
||||||
WriteBatch write_batch;
|
|
||||||
ASSERT_OK(WriteBatchInternal::PutBlobIndex(&write_batch, 0, key, blob_idx));
|
ASSERT_OK(Put(key, blob));
|
||||||
ASSERT_OK(db_->Write(WriteOptions(), &write_batch));
|
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"CompactionIterator::InvokeFilterIfNeeded::TamperWithBlobIndex",
|
||||||
|
[](void* arg) {
|
||||||
|
Slice* const blob_index = static_cast<Slice*>(arg);
|
||||||
|
assert(blob_index);
|
||||||
|
assert(!blob_index->empty());
|
||||||
|
blob_index->remove_prefix(1);
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
ASSERT_TRUE(db_->CompactRange(CompactRangeOptions(), /*begin=*/nullptr,
|
ASSERT_TRUE(db_->CompactRange(CompactRangeOptions(), /*begin=*/nullptr,
|
||||||
/*end=*/nullptr)
|
/*end=*/nullptr)
|
||||||
.IsCorruption());
|
.IsCorruption());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
|
||||||
Close();
|
Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/arena_wrapped_db_iter.h"
|
#include "db/arena_wrapped_db_iter.h"
|
||||||
|
#include "db/blob/blob_index.h"
|
||||||
#include "db/column_family.h"
|
#include "db/column_family.h"
|
||||||
#include "db/db_iter.h"
|
#include "db/db_iter.h"
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
@ -138,20 +139,39 @@ class DBBlobIndexTest : public DBTestBase {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Should be able to write kTypeBlobIndex to memtables and SST files.
|
// Note: the following test case pertains to the StackableDB-based BlobDB
|
||||||
|
// implementation. We should be able to write kTypeBlobIndex to memtables and
|
||||||
|
// SST files.
|
||||||
TEST_F(DBBlobIndexTest, Write) {
|
TEST_F(DBBlobIndexTest, Write) {
|
||||||
for (auto tier : kAllTiers) {
|
for (auto tier : kAllTiers) {
|
||||||
DestroyAndReopen(GetTestOptions());
|
DestroyAndReopen(GetTestOptions());
|
||||||
for (int i = 1; i <= 5; i++) {
|
|
||||||
std::string index = ToString(i);
|
std::vector<std::pair<std::string, std::string>> key_values;
|
||||||
|
|
||||||
|
constexpr size_t num_key_values = 5;
|
||||||
|
|
||||||
|
key_values.reserve(num_key_values);
|
||||||
|
|
||||||
|
for (size_t i = 1; i <= num_key_values; ++i) {
|
||||||
|
std::string key = "key" + std::to_string(i);
|
||||||
|
|
||||||
|
std::string blob_index;
|
||||||
|
BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 9876543210,
|
||||||
|
"blob" + std::to_string(i));
|
||||||
|
|
||||||
|
key_values.emplace_back(std::move(key), std::move(blob_index));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& key_value : key_values) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
ASSERT_OK(PutBlobIndex(&batch, "key" + index, "blob" + index));
|
ASSERT_OK(PutBlobIndex(&batch, key_value.first, key_value.second));
|
||||||
ASSERT_OK(Write(&batch));
|
ASSERT_OK(Write(&batch));
|
||||||
}
|
}
|
||||||
|
|
||||||
MoveDataTo(tier);
|
MoveDataTo(tier);
|
||||||
for (int i = 1; i <= 5; i++) {
|
|
||||||
std::string index = ToString(i);
|
for (const auto& key_value : key_values) {
|
||||||
ASSERT_EQ("blob" + index, GetBlobIndex("key" + index));
|
ASSERT_EQ(GetBlobIndex(key_value.first), key_value.second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -164,13 +184,19 @@ TEST_F(DBBlobIndexTest, Write) {
|
|||||||
// accidentally opening the base DB of a stacked BlobDB and actual corruption
|
// accidentally opening the base DB of a stacked BlobDB and actual corruption
|
||||||
// when using the integrated BlobDB.
|
// when using the integrated BlobDB.
|
||||||
TEST_F(DBBlobIndexTest, Get) {
|
TEST_F(DBBlobIndexTest, Get) {
|
||||||
|
std::string blob_index;
|
||||||
|
BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 9876543210, "blob");
|
||||||
|
|
||||||
for (auto tier : kAllTiers) {
|
for (auto tier : kAllTiers) {
|
||||||
DestroyAndReopen(GetTestOptions());
|
DestroyAndReopen(GetTestOptions());
|
||||||
|
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
ASSERT_OK(batch.Put("key", "value"));
|
ASSERT_OK(batch.Put("key", "value"));
|
||||||
ASSERT_OK(PutBlobIndex(&batch, "blob_key", "blob_index"));
|
ASSERT_OK(PutBlobIndex(&batch, "blob_key", blob_index));
|
||||||
ASSERT_OK(Write(&batch));
|
ASSERT_OK(Write(&batch));
|
||||||
|
|
||||||
MoveDataTo(tier);
|
MoveDataTo(tier);
|
||||||
|
|
||||||
// Verify normal value
|
// Verify normal value
|
||||||
bool is_blob_index = false;
|
bool is_blob_index = false;
|
||||||
PinnableSlice value;
|
PinnableSlice value;
|
||||||
@ -178,6 +204,7 @@ TEST_F(DBBlobIndexTest, Get) {
|
|||||||
ASSERT_EQ("value", GetImpl("key"));
|
ASSERT_EQ("value", GetImpl("key"));
|
||||||
ASSERT_EQ("value", GetImpl("key", &is_blob_index));
|
ASSERT_EQ("value", GetImpl("key", &is_blob_index));
|
||||||
ASSERT_FALSE(is_blob_index);
|
ASSERT_FALSE(is_blob_index);
|
||||||
|
|
||||||
// Verify blob index
|
// Verify blob index
|
||||||
if (tier <= kImmutableMemtables) {
|
if (tier <= kImmutableMemtables) {
|
||||||
ASSERT_TRUE(Get("blob_key", &value).IsNotSupported());
|
ASSERT_TRUE(Get("blob_key", &value).IsNotSupported());
|
||||||
@ -186,7 +213,7 @@ TEST_F(DBBlobIndexTest, Get) {
|
|||||||
ASSERT_TRUE(Get("blob_key", &value).IsCorruption());
|
ASSERT_TRUE(Get("blob_key", &value).IsCorruption());
|
||||||
ASSERT_EQ("CORRUPTION", GetImpl("blob_key"));
|
ASSERT_EQ("CORRUPTION", GetImpl("blob_key"));
|
||||||
}
|
}
|
||||||
ASSERT_EQ("blob_index", GetImpl("blob_key", &is_blob_index));
|
ASSERT_EQ(blob_index, GetImpl("blob_key", &is_blob_index));
|
||||||
ASSERT_TRUE(is_blob_index);
|
ASSERT_TRUE(is_blob_index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -196,11 +223,14 @@ TEST_F(DBBlobIndexTest, Get) {
|
|||||||
// if blob index is updated with a normal value. See the test case above for
|
// if blob index is updated with a normal value. See the test case above for
|
||||||
// more details.
|
// more details.
|
||||||
TEST_F(DBBlobIndexTest, Updated) {
|
TEST_F(DBBlobIndexTest, Updated) {
|
||||||
|
std::string blob_index;
|
||||||
|
BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 9876543210, "blob");
|
||||||
|
|
||||||
for (auto tier : kAllTiers) {
|
for (auto tier : kAllTiers) {
|
||||||
DestroyAndReopen(GetTestOptions());
|
DestroyAndReopen(GetTestOptions());
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(PutBlobIndex(&batch, "key" + ToString(i), "blob_index"));
|
ASSERT_OK(PutBlobIndex(&batch, "key" + std::to_string(i), blob_index));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Write(&batch));
|
ASSERT_OK(Write(&batch));
|
||||||
// Avoid blob values from being purged.
|
// Avoid blob values from being purged.
|
||||||
@ -218,7 +248,7 @@ TEST_F(DBBlobIndexTest, Updated) {
|
|||||||
ASSERT_OK(dbfull()->DeleteRange(WriteOptions(), cfh(), "key6", "key9"));
|
ASSERT_OK(dbfull()->DeleteRange(WriteOptions(), cfh(), "key6", "key9"));
|
||||||
MoveDataTo(tier);
|
MoveDataTo(tier);
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_EQ("blob_index", GetBlobIndex("key" + ToString(i), snapshot));
|
ASSERT_EQ(blob_index, GetBlobIndex("key" + std::to_string(i), snapshot));
|
||||||
}
|
}
|
||||||
ASSERT_EQ("new_value", Get("key1"));
|
ASSERT_EQ("new_value", Get("key1"));
|
||||||
if (tier <= kImmutableMemtables) {
|
if (tier <= kImmutableMemtables) {
|
||||||
@ -230,9 +260,9 @@ TEST_F(DBBlobIndexTest, Updated) {
|
|||||||
ASSERT_EQ("NOT_FOUND", Get("key4"));
|
ASSERT_EQ("NOT_FOUND", Get("key4"));
|
||||||
ASSERT_EQ("a,b,c", GetImpl("key5"));
|
ASSERT_EQ("a,b,c", GetImpl("key5"));
|
||||||
for (int i = 6; i < 9; i++) {
|
for (int i = 6; i < 9; i++) {
|
||||||
ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i)));
|
ASSERT_EQ("NOT_FOUND", Get("key" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
ASSERT_EQ("blob_index", GetBlobIndex("key9"));
|
ASSERT_EQ(blob_index, GetBlobIndex("key9"));
|
||||||
dbfull()->ReleaseSnapshot(snapshot);
|
dbfull()->ReleaseSnapshot(snapshot);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -271,7 +301,7 @@ TEST_F(DBBlobIndexTest, Iterate) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
auto get_value = [&](int index, int version) {
|
auto get_value = [&](int index, int version) {
|
||||||
return get_key(index) + "_value" + ToString(version);
|
return get_key(index) + "_value" + std::to_string(version);
|
||||||
};
|
};
|
||||||
|
|
||||||
auto check_iterator = [&](Iterator* iterator, Status::Code expected_status,
|
auto check_iterator = [&](Iterator* iterator, Status::Code expected_status,
|
||||||
@ -471,7 +501,7 @@ TEST_F(DBBlobIndexTest, IntegratedBlobIterate) {
|
|||||||
auto get_key = [](size_t index) { return ("key" + std::to_string(index)); };
|
auto get_key = [](size_t index) { return ("key" + std::to_string(index)); };
|
||||||
|
|
||||||
auto get_value = [&](size_t index, size_t version) {
|
auto get_value = [&](size_t index, size_t version) {
|
||||||
return get_key(index) + "_value" + ToString(version);
|
return get_key(index) + "_value" + std::to_string(version);
|
||||||
};
|
};
|
||||||
|
|
||||||
auto check_iterator = [&](Iterator* iterator, Status expected_status,
|
auto check_iterator = [&](Iterator* iterator, Status expected_status,
|
||||||
|
@ -62,9 +62,9 @@ Status BuildTable(
|
|||||||
FileMetaData* meta, std::vector<BlobFileAddition>* blob_file_additions,
|
FileMetaData* meta, std::vector<BlobFileAddition>* blob_file_additions,
|
||||||
std::vector<SequenceNumber> snapshots,
|
std::vector<SequenceNumber> snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
SnapshotChecker* snapshot_checker, bool paranoid_file_checks,
|
SequenceNumber job_snapshot, SnapshotChecker* snapshot_checker,
|
||||||
InternalStats* internal_stats, IOStatus* io_status,
|
bool paranoid_file_checks, InternalStats* internal_stats,
|
||||||
const std::shared_ptr<IOTracer>& io_tracer,
|
IOStatus* io_status, const std::shared_ptr<IOTracer>& io_tracer,
|
||||||
BlobFileCreationReason blob_creation_reason, EventLogger* event_logger,
|
BlobFileCreationReason blob_creation_reason, EventLogger* event_logger,
|
||||||
int job_id, const Env::IOPriority io_priority,
|
int job_id, const Env::IOPriority io_priority,
|
||||||
TableProperties* table_properties, Env::WriteLifeTimeHint write_hint,
|
TableProperties* table_properties, Env::WriteLifeTimeHint write_hint,
|
||||||
@ -115,6 +115,7 @@ Status BuildTable(
|
|||||||
assert(fs);
|
assert(fs);
|
||||||
|
|
||||||
TableProperties tp;
|
TableProperties tp;
|
||||||
|
bool table_file_created = false;
|
||||||
if (iter->Valid() || !range_del_agg->IsEmpty()) {
|
if (iter->Valid() || !range_del_agg->IsEmpty()) {
|
||||||
std::unique_ptr<CompactionFilter> compaction_filter;
|
std::unique_ptr<CompactionFilter> compaction_filter;
|
||||||
if (ioptions.compaction_filter_factory != nullptr &&
|
if (ioptions.compaction_filter_factory != nullptr &&
|
||||||
@ -158,6 +159,8 @@ Status BuildTable(
|
|||||||
file_checksum_func_name);
|
file_checksum_func_name);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table_file_created = true;
|
||||||
FileTypeSet tmp_set = ioptions.checksum_handoff_file_types;
|
FileTypeSet tmp_set = ioptions.checksum_handoff_file_types;
|
||||||
file->SetIOPriority(io_priority);
|
file->SetIOPriority(io_priority);
|
||||||
file->SetWriteLifeTimeHint(write_hint);
|
file->SetWriteLifeTimeHint(write_hint);
|
||||||
@ -189,12 +192,14 @@ Status BuildTable(
|
|||||||
CompactionIterator c_iter(
|
CompactionIterator c_iter(
|
||||||
iter, tboptions.internal_comparator.user_comparator(), &merge,
|
iter, tboptions.internal_comparator.user_comparator(), &merge,
|
||||||
kMaxSequenceNumber, &snapshots, earliest_write_conflict_snapshot,
|
kMaxSequenceNumber, &snapshots, earliest_write_conflict_snapshot,
|
||||||
snapshot_checker, env, ShouldReportDetailedTime(env, ioptions.stats),
|
job_snapshot, snapshot_checker, env,
|
||||||
|
ShouldReportDetailedTime(env, ioptions.stats),
|
||||||
true /* internal key corruption is not ok */, range_del_agg.get(),
|
true /* internal key corruption is not ok */, range_del_agg.get(),
|
||||||
blob_file_builder.get(), ioptions.allow_data_in_errors,
|
blob_file_builder.get(), ioptions.allow_data_in_errors,
|
||||||
|
ioptions.enforce_single_del_contracts,
|
||||||
/*compaction=*/nullptr, compaction_filter.get(),
|
/*compaction=*/nullptr, compaction_filter.get(),
|
||||||
/*shutting_down=*/nullptr,
|
/*shutting_down=*/nullptr,
|
||||||
/*preserve_deletes_seqnum=*/0, /*manual_compaction_paused=*/nullptr,
|
/*manual_compaction_paused=*/nullptr,
|
||||||
/*manual_compaction_canceled=*/nullptr, db_options.info_log,
|
/*manual_compaction_canceled=*/nullptr, db_options.info_log,
|
||||||
full_history_ts_low);
|
full_history_ts_low);
|
||||||
|
|
||||||
@ -211,7 +216,11 @@ Status BuildTable(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
builder->Add(key, value);
|
builder->Add(key, value);
|
||||||
meta->UpdateBoundaries(key, value, ikey.sequence, ikey.type);
|
|
||||||
|
s = meta->UpdateBoundaries(key, value, ikey.sequence, ikey.type);
|
||||||
|
if (!s.ok()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(noetzli): Update stats after flush, too.
|
// TODO(noetzli): Update stats after flush, too.
|
||||||
if (io_priority == Env::IO_HIGH &&
|
if (io_priority == Env::IO_HIGH &&
|
||||||
@ -366,15 +375,17 @@ Status BuildTable(
|
|||||||
|
|
||||||
constexpr IODebugContext* dbg = nullptr;
|
constexpr IODebugContext* dbg = nullptr;
|
||||||
|
|
||||||
Status ignored = fs->DeleteFile(fname, IOOptions(), dbg);
|
if (table_file_created) {
|
||||||
ignored.PermitUncheckedError();
|
Status ignored = fs->DeleteFile(fname, IOOptions(), dbg);
|
||||||
|
ignored.PermitUncheckedError();
|
||||||
|
}
|
||||||
|
|
||||||
assert(blob_file_additions || blob_file_paths.empty());
|
assert(blob_file_additions || blob_file_paths.empty());
|
||||||
|
|
||||||
if (blob_file_additions) {
|
if (blob_file_additions) {
|
||||||
for (const std::string& blob_file_path : blob_file_paths) {
|
for (const std::string& blob_file_path : blob_file_paths) {
|
||||||
ignored = DeleteDBFile(&db_options, blob_file_path, dbname,
|
Status ignored = DeleteDBFile(&db_options, blob_file_path, dbname,
|
||||||
/*force_bg=*/false, /*force_fg=*/false);
|
/*force_bg=*/false, /*force_fg=*/false);
|
||||||
ignored.PermitUncheckedError();
|
ignored.PermitUncheckedError();
|
||||||
TEST_SYNC_POINT("BuildTable::AfterDeleteFile");
|
TEST_SYNC_POINT("BuildTable::AfterDeleteFile");
|
||||||
}
|
}
|
||||||
|
@ -57,9 +57,9 @@ extern Status BuildTable(
|
|||||||
FileMetaData* meta, std::vector<BlobFileAddition>* blob_file_additions,
|
FileMetaData* meta, std::vector<BlobFileAddition>* blob_file_additions,
|
||||||
std::vector<SequenceNumber> snapshots,
|
std::vector<SequenceNumber> snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
SnapshotChecker* snapshot_checker, bool paranoid_file_checks,
|
SequenceNumber job_snapshot, SnapshotChecker* snapshot_checker,
|
||||||
InternalStats* internal_stats, IOStatus* io_status,
|
bool paranoid_file_checks, InternalStats* internal_stats,
|
||||||
const std::shared_ptr<IOTracer>& io_tracer,
|
IOStatus* io_status, const std::shared_ptr<IOTracer>& io_tracer,
|
||||||
BlobFileCreationReason blob_creation_reason,
|
BlobFileCreationReason blob_creation_reason,
|
||||||
EventLogger* event_logger = nullptr, int job_id = 0,
|
EventLogger* event_logger = nullptr, int job_id = 0,
|
||||||
const Env::IOPriority io_priority = Env::IO_HIGH,
|
const Env::IOPriority io_priority = Env::IO_HIGH,
|
||||||
|
45
db/c.cc
45
db/c.cc
@ -1163,6 +1163,43 @@ void rocksdb_multi_get_cf(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void rocksdb_batched_multi_get_cf(rocksdb_t* db,
|
||||||
|
const rocksdb_readoptions_t* options,
|
||||||
|
rocksdb_column_family_handle_t* column_family,
|
||||||
|
size_t num_keys, const char* const* keys_list,
|
||||||
|
const size_t* keys_list_sizes,
|
||||||
|
rocksdb_pinnableslice_t** values, char** errs,
|
||||||
|
const bool sorted_input) {
|
||||||
|
Slice* key_slices = new Slice[num_keys];
|
||||||
|
PinnableSlice* value_slices = new PinnableSlice[num_keys];
|
||||||
|
Status* statuses = new Status[num_keys];
|
||||||
|
for (size_t i = 0; i < num_keys; ++i) {
|
||||||
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
db->rep->MultiGet(options->rep, column_family->rep, num_keys, key_slices,
|
||||||
|
value_slices, statuses, sorted_input);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < num_keys; ++i) {
|
||||||
|
if (statuses[i].ok()) {
|
||||||
|
values[i] = new (rocksdb_pinnableslice_t);
|
||||||
|
values[i]->rep = std::move(value_slices[i]);
|
||||||
|
errs[i] = nullptr;
|
||||||
|
} else {
|
||||||
|
values[i] = nullptr;
|
||||||
|
if (!statuses[i].IsNotFound()) {
|
||||||
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
||||||
|
} else {
|
||||||
|
errs[i] = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete[] key_slices;
|
||||||
|
delete[] value_slices;
|
||||||
|
delete[] statuses;
|
||||||
|
}
|
||||||
|
|
||||||
unsigned char rocksdb_key_may_exist(rocksdb_t* db,
|
unsigned char rocksdb_key_may_exist(rocksdb_t* db,
|
||||||
const rocksdb_readoptions_t* options,
|
const rocksdb_readoptions_t* options,
|
||||||
const char* key, size_t key_len,
|
const char* key, size_t key_len,
|
||||||
@ -4193,6 +4230,14 @@ rocksdb_cache_t* rocksdb_cache_create_lru(size_t capacity) {
|
|||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rocksdb_cache_t* rocksdb_cache_create_lru_with_strict_capacity_limit(
|
||||||
|
size_t capacity) {
|
||||||
|
rocksdb_cache_t* c = new rocksdb_cache_t;
|
||||||
|
c->rep = NewLRUCache(capacity);
|
||||||
|
c->rep->SetStrictCapacityLimit(true);
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
|
||||||
rocksdb_cache_t* rocksdb_cache_create_lru_opts(
|
rocksdb_cache_t* rocksdb_cache_create_lru_opts(
|
||||||
rocksdb_lru_cache_options_t* opt) {
|
rocksdb_lru_cache_options_t* opt) {
|
||||||
rocksdb_cache_t* c = new rocksdb_cache_t;
|
rocksdb_cache_t* c = new rocksdb_cache_t;
|
||||||
|
27
db/c_test.c
27
db/c_test.c
@ -1260,15 +1260,18 @@ int main(int argc, char** argv) {
|
|||||||
rocksdb_writebatch_clear(wb);
|
rocksdb_writebatch_clear(wb);
|
||||||
rocksdb_writebatch_put_cf(wb, handles[1], "bar", 3, "b", 1);
|
rocksdb_writebatch_put_cf(wb, handles[1], "bar", 3, "b", 1);
|
||||||
rocksdb_writebatch_put_cf(wb, handles[1], "box", 3, "c", 1);
|
rocksdb_writebatch_put_cf(wb, handles[1], "box", 3, "c", 1);
|
||||||
|
rocksdb_writebatch_put_cf(wb, handles[1], "buff", 4, "rocksdb", 7);
|
||||||
rocksdb_writebatch_delete_cf(wb, handles[1], "bar", 3);
|
rocksdb_writebatch_delete_cf(wb, handles[1], "bar", 3);
|
||||||
rocksdb_write(db, woptions, wb, &err);
|
rocksdb_write(db, woptions, wb, &err);
|
||||||
CheckNoError(err);
|
CheckNoError(err);
|
||||||
CheckGetCF(db, roptions, handles[1], "baz", NULL);
|
CheckGetCF(db, roptions, handles[1], "baz", NULL);
|
||||||
CheckGetCF(db, roptions, handles[1], "bar", NULL);
|
CheckGetCF(db, roptions, handles[1], "bar", NULL);
|
||||||
CheckGetCF(db, roptions, handles[1], "box", "c");
|
CheckGetCF(db, roptions, handles[1], "box", "c");
|
||||||
|
CheckGetCF(db, roptions, handles[1], "buff", "rocksdb");
|
||||||
CheckPinGetCF(db, roptions, handles[1], "baz", NULL);
|
CheckPinGetCF(db, roptions, handles[1], "baz", NULL);
|
||||||
CheckPinGetCF(db, roptions, handles[1], "bar", NULL);
|
CheckPinGetCF(db, roptions, handles[1], "bar", NULL);
|
||||||
CheckPinGetCF(db, roptions, handles[1], "box", "c");
|
CheckPinGetCF(db, roptions, handles[1], "box", "c");
|
||||||
|
CheckPinGetCF(db, roptions, handles[1], "buff", "rocksdb");
|
||||||
rocksdb_writebatch_destroy(wb);
|
rocksdb_writebatch_destroy(wb);
|
||||||
|
|
||||||
rocksdb_flush_wal(db, 1, &err);
|
rocksdb_flush_wal(db, 1, &err);
|
||||||
@ -1299,6 +1302,26 @@ int main(int argc, char** argv) {
|
|||||||
Free(&vals[i]);
|
Free(&vals[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const char* batched_keys[4] = {"box", "buff", "barfooxx", "box"};
|
||||||
|
const size_t batched_keys_sizes[4] = {3, 4, 8, 3};
|
||||||
|
const char* expected_value[4] = {"c", "rocksdb", NULL, "c"};
|
||||||
|
char* batched_errs[4];
|
||||||
|
|
||||||
|
rocksdb_pinnableslice_t* pvals[4];
|
||||||
|
rocksdb_batched_multi_get_cf(db, roptions, handles[1], 4, batched_keys,
|
||||||
|
batched_keys_sizes, pvals, batched_errs,
|
||||||
|
false);
|
||||||
|
const char* val;
|
||||||
|
size_t val_len;
|
||||||
|
for (i = 0; i < 4; ++i) {
|
||||||
|
val = rocksdb_pinnableslice_value(pvals[i], &val_len);
|
||||||
|
CheckNoError(batched_errs[i]);
|
||||||
|
CheckEqual(expected_value[i], val, val_len);
|
||||||
|
rocksdb_pinnableslice_destroy(pvals[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
unsigned char value_found = 0;
|
unsigned char value_found = 0;
|
||||||
|
|
||||||
@ -1330,7 +1353,7 @@ int main(int argc, char** argv) {
|
|||||||
for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) {
|
for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) {
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
CheckCondition(i == 3);
|
CheckCondition(i == 4);
|
||||||
rocksdb_iter_get_error(iter, &err);
|
rocksdb_iter_get_error(iter, &err);
|
||||||
CheckNoError(err);
|
CheckNoError(err);
|
||||||
rocksdb_iter_destroy(iter);
|
rocksdb_iter_destroy(iter);
|
||||||
@ -1354,7 +1377,7 @@ int main(int argc, char** argv) {
|
|||||||
for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) {
|
for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) {
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
CheckCondition(i == 3);
|
CheckCondition(i == 4);
|
||||||
rocksdb_iter_get_error(iter, &err);
|
rocksdb_iter_get_error(iter, &err);
|
||||||
CheckNoError(err);
|
CheckNoError(err);
|
||||||
rocksdb_iter_destroy(iter);
|
rocksdb_iter_destroy(iter);
|
||||||
|
@ -501,7 +501,8 @@ std::vector<std::string> ColumnFamilyData::GetDbPaths() const {
|
|||||||
return paths;
|
return paths;
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = port::kMaxUint32;
|
const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId =
|
||||||
|
std::numeric_limits<uint32_t>::max();
|
||||||
|
|
||||||
ColumnFamilyData::ColumnFamilyData(
|
ColumnFamilyData::ColumnFamilyData(
|
||||||
uint32_t id, const std::string& name, Version* _dummy_versions,
|
uint32_t id, const std::string& name, Version* _dummy_versions,
|
||||||
@ -826,8 +827,8 @@ int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger,
|
|||||||
// condition.
|
// condition.
|
||||||
// Or twice as compaction trigger, if it is smaller.
|
// Or twice as compaction trigger, if it is smaller.
|
||||||
int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown);
|
int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown);
|
||||||
if (res >= port::kMaxInt32) {
|
if (res >= std::numeric_limits<int32_t>::max()) {
|
||||||
return port::kMaxInt32;
|
return std::numeric_limits<int32_t>::max();
|
||||||
} else {
|
} else {
|
||||||
// res fits in int
|
// res fits in int
|
||||||
return static_cast<int>(res);
|
return static_cast<int>(res);
|
||||||
|
@ -9,10 +9,10 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "db/memtable_list.h"
|
#include "db/memtable_list.h"
|
||||||
#include "db/table_cache.h"
|
#include "db/table_cache.h"
|
||||||
@ -25,6 +25,7 @@
|
|||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/options.h"
|
#include "rocksdb/options.h"
|
||||||
#include "trace_replay/block_cache_tracer.h"
|
#include "trace_replay/block_cache_tracer.h"
|
||||||
|
#include "util/hash_containers.h"
|
||||||
#include "util/thread_local.h"
|
#include "util/thread_local.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -705,8 +706,8 @@ class ColumnFamilySet {
|
|||||||
// * when reading, at least one condition needs to be satisfied:
|
// * when reading, at least one condition needs to be satisfied:
|
||||||
// 1. DB mutex locked
|
// 1. DB mutex locked
|
||||||
// 2. accessed from a single-threaded write thread
|
// 2. accessed from a single-threaded write thread
|
||||||
std::unordered_map<std::string, uint32_t> column_families_;
|
UnorderedMap<std::string, uint32_t> column_families_;
|
||||||
std::unordered_map<uint32_t, ColumnFamilyData*> column_family_data_;
|
UnorderedMap<uint32_t, ColumnFamilyData*> column_family_data_;
|
||||||
|
|
||||||
uint32_t max_column_family_;
|
uint32_t max_column_family_;
|
||||||
const FileOptions file_options_;
|
const FileOptions file_options_;
|
||||||
|
@ -383,7 +383,7 @@ class ColumnFamilyTestBase : public testing::Test {
|
|||||||
|
|
||||||
int NumTableFilesAtLevel(int level, int cf) {
|
int NumTableFilesAtLevel(int level, int cf) {
|
||||||
return GetProperty(cf,
|
return GetProperty(cf,
|
||||||
"rocksdb.num-files-at-level" + ToString(level));
|
"rocksdb.num-files-at-level" + std::to_string(level));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
@ -783,7 +783,7 @@ TEST_P(ColumnFamilyTest, BulkAddDrop) {
|
|||||||
std::vector<std::string> cf_names;
|
std::vector<std::string> cf_names;
|
||||||
std::vector<ColumnFamilyHandle*> cf_handles;
|
std::vector<ColumnFamilyHandle*> cf_handles;
|
||||||
for (int i = 1; i <= kNumCF; i++) {
|
for (int i = 1; i <= kNumCF; i++) {
|
||||||
cf_names.push_back("cf1-" + ToString(i));
|
cf_names.push_back("cf1-" + std::to_string(i));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db_->CreateColumnFamilies(cf_options, cf_names, &cf_handles));
|
ASSERT_OK(db_->CreateColumnFamilies(cf_options, cf_names, &cf_handles));
|
||||||
for (int i = 1; i <= kNumCF; i++) {
|
for (int i = 1; i <= kNumCF; i++) {
|
||||||
@ -796,7 +796,8 @@ TEST_P(ColumnFamilyTest, BulkAddDrop) {
|
|||||||
}
|
}
|
||||||
cf_handles.clear();
|
cf_handles.clear();
|
||||||
for (int i = 1; i <= kNumCF; i++) {
|
for (int i = 1; i <= kNumCF; i++) {
|
||||||
cf_descriptors.emplace_back("cf2-" + ToString(i), ColumnFamilyOptions());
|
cf_descriptors.emplace_back("cf2-" + std::to_string(i),
|
||||||
|
ColumnFamilyOptions());
|
||||||
}
|
}
|
||||||
ASSERT_OK(db_->CreateColumnFamilies(cf_descriptors, &cf_handles));
|
ASSERT_OK(db_->CreateColumnFamilies(cf_descriptors, &cf_handles));
|
||||||
for (int i = 1; i <= kNumCF; i++) {
|
for (int i = 1; i <= kNumCF; i++) {
|
||||||
@ -820,7 +821,7 @@ TEST_P(ColumnFamilyTest, DropTest) {
|
|||||||
Open({"default"});
|
Open({"default"});
|
||||||
CreateColumnFamiliesAndReopen({"pikachu"});
|
CreateColumnFamiliesAndReopen({"pikachu"});
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
ASSERT_OK(Put(1, ToString(i), "bar" + ToString(i)));
|
ASSERT_OK(Put(1, std::to_string(i), "bar" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
|
|
||||||
@ -1344,7 +1345,7 @@ TEST_P(ColumnFamilyTest, DifferentCompactionStyles) {
|
|||||||
PutRandomData(1, 10, 12000);
|
PutRandomData(1, 10, 12000);
|
||||||
PutRandomData(1, 1, 10);
|
PutRandomData(1, 1, 10);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// SETUP column family "two" -- level style with 4 levels
|
// SETUP column family "two" -- level style with 4 levels
|
||||||
@ -1352,7 +1353,7 @@ TEST_P(ColumnFamilyTest, DifferentCompactionStyles) {
|
|||||||
PutRandomData(2, 10, 12000);
|
PutRandomData(2, 10, 12000);
|
||||||
PutRandomData(2, 1, 10);
|
PutRandomData(2, 1, 10);
|
||||||
WaitForFlush(2);
|
WaitForFlush(2);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 2);
|
AssertFilesPerLevel(std::to_string(i + 1), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TRIGGER compaction "one"
|
// TRIGGER compaction "one"
|
||||||
@ -1416,7 +1417,7 @@ TEST_P(ColumnFamilyTest, MultipleManualCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
bool cf_1_1 = true;
|
bool cf_1_1 = true;
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
||||||
@ -1446,7 +1447,7 @@ TEST_P(ColumnFamilyTest, MultipleManualCompactions) {
|
|||||||
PutRandomData(2, 10, 12000);
|
PutRandomData(2, 10, 12000);
|
||||||
PutRandomData(2, 1, 10);
|
PutRandomData(2, 1, 10);
|
||||||
WaitForFlush(2);
|
WaitForFlush(2);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 2);
|
AssertFilesPerLevel(std::to_string(i + 1), 2);
|
||||||
}
|
}
|
||||||
threads.emplace_back([&] {
|
threads.emplace_back([&] {
|
||||||
TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:1");
|
TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:1");
|
||||||
@ -1533,7 +1534,7 @@ TEST_P(ColumnFamilyTest, AutomaticAndManualCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1");
|
TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1");
|
||||||
@ -1543,7 +1544,7 @@ TEST_P(ColumnFamilyTest, AutomaticAndManualCompactions) {
|
|||||||
PutRandomData(2, 10, 12000);
|
PutRandomData(2, 10, 12000);
|
||||||
PutRandomData(2, 1, 10);
|
PutRandomData(2, 1, 10);
|
||||||
WaitForFlush(2);
|
WaitForFlush(2);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 2);
|
AssertFilesPerLevel(std::to_string(i + 1), 2);
|
||||||
}
|
}
|
||||||
ROCKSDB_NAMESPACE::port::Thread threads([&] {
|
ROCKSDB_NAMESPACE::port::Thread threads([&] {
|
||||||
CompactRangeOptions compact_options;
|
CompactRangeOptions compact_options;
|
||||||
@ -1615,7 +1616,7 @@ TEST_P(ColumnFamilyTest, ManualAndAutomaticCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
bool cf_1_1 = true;
|
bool cf_1_1 = true;
|
||||||
bool cf_1_2 = true;
|
bool cf_1_2 = true;
|
||||||
@ -1650,7 +1651,7 @@ TEST_P(ColumnFamilyTest, ManualAndAutomaticCompactions) {
|
|||||||
PutRandomData(2, 10, 12000);
|
PutRandomData(2, 10, 12000);
|
||||||
PutRandomData(2, 1, 10);
|
PutRandomData(2, 1, 10);
|
||||||
WaitForFlush(2);
|
WaitForFlush(2);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 2);
|
AssertFilesPerLevel(std::to_string(i + 1), 2);
|
||||||
}
|
}
|
||||||
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5");
|
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5");
|
||||||
threads.join();
|
threads.join();
|
||||||
@ -1709,7 +1710,7 @@ TEST_P(ColumnFamilyTest, SameCFManualManualCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
bool cf_1_1 = true;
|
bool cf_1_1 = true;
|
||||||
bool cf_1_2 = true;
|
bool cf_1_2 = true;
|
||||||
@ -1748,8 +1749,8 @@ TEST_P(ColumnFamilyTest, SameCFManualManualCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
|
AssertFilesPerLevel(
|
||||||
1);
|
std::to_string(one.level0_file_num_compaction_trigger + i), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::port::Thread threads1([&] {
|
ROCKSDB_NAMESPACE::port::Thread threads1([&] {
|
||||||
@ -1811,7 +1812,7 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
bool cf_1_1 = true;
|
bool cf_1_1 = true;
|
||||||
bool cf_1_2 = true;
|
bool cf_1_2 = true;
|
||||||
@ -1849,8 +1850,8 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
|
AssertFilesPerLevel(
|
||||||
1);
|
std::to_string(one.level0_file_num_compaction_trigger + i), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
|
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
|
||||||
@ -1904,7 +1905,7 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
bool cf_1_1 = true;
|
bool cf_1_1 = true;
|
||||||
bool cf_1_2 = true;
|
bool cf_1_2 = true;
|
||||||
@ -1942,8 +1943,8 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
|
AssertFilesPerLevel(
|
||||||
1);
|
std::to_string(one.level0_file_num_compaction_trigger + i), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
|
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
|
||||||
@ -2024,7 +2025,7 @@ TEST_P(ColumnFamilyTest, SameCFAutomaticManualCompactions) {
|
|||||||
PutRandomData(1, 10, 12000, true);
|
PutRandomData(1, 10, 12000, true);
|
||||||
PutRandomData(1, 1, 10, true);
|
PutRandomData(1, 1, 10, true);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
AssertFilesPerLevel(ToString(i + 1), 1);
|
AssertFilesPerLevel(std::to_string(i + 1), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5");
|
TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5");
|
||||||
|
@ -91,8 +91,8 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) {
|
|||||||
// create couple files
|
// create couple files
|
||||||
// Background compaction starts and waits in BackgroundCallCompaction:0
|
// Background compaction starts and waits in BackgroundCallCompaction:0
|
||||||
for (int i = 0; i < kLevel0Trigger * 4; ++i) {
|
for (int i = 0; i < kLevel0Trigger * 4; ++i) {
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(i), ""));
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), ""));
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(100 - i), ""));
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(100 - i), ""));
|
||||||
ASSERT_OK(db->Flush(FlushOptions()));
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,7 +136,7 @@ TEST_F(CompactFilesTest, MultipleLevel) {
|
|||||||
// create couple files in L0, L3, L4 and L5
|
// create couple files in L0, L3, L4 and L5
|
||||||
for (int i = 5; i > 2; --i) {
|
for (int i = 5; i > 2; --i) {
|
||||||
collector->ClearFlushedFiles();
|
collector->ClearFlushedFiles();
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(i), ""));
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), ""));
|
||||||
ASSERT_OK(db->Flush(FlushOptions()));
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
// Ensure background work is fully finished including listener callbacks
|
// Ensure background work is fully finished including listener callbacks
|
||||||
// before accessing listener state.
|
// before accessing listener state.
|
||||||
@ -145,11 +145,11 @@ TEST_F(CompactFilesTest, MultipleLevel) {
|
|||||||
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, i));
|
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, i));
|
||||||
|
|
||||||
std::string prop;
|
std::string prop;
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(db->GetProperty(
|
||||||
db->GetProperty("rocksdb.num-files-at-level" + ToString(i), &prop));
|
"rocksdb.num-files-at-level" + std::to_string(i), &prop));
|
||||||
ASSERT_EQ("1", prop);
|
ASSERT_EQ("1", prop);
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(0), ""));
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(0), ""));
|
||||||
ASSERT_OK(db->Flush(FlushOptions()));
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
|
|
||||||
ColumnFamilyMetaData meta;
|
ColumnFamilyMetaData meta;
|
||||||
@ -218,7 +218,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
|
|||||||
|
|
||||||
// create couple files
|
// create couple files
|
||||||
for (int i = 1000; i < 2000; ++i) {
|
for (int i = 1000; i < 2000; ++i) {
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(i),
|
||||||
std::string(kWriteBufferSize / 10, 'a' + (i % 26))));
|
std::string(kWriteBufferSize / 10, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,14 +257,14 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) {
|
|||||||
|
|
||||||
// create couple files
|
// create couple files
|
||||||
for (int i = 0; i < 500; ++i) {
|
for (int i = 0; i < 500; ++i) {
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(i),
|
||||||
std::string(1000, 'a' + (i % 26))));
|
std::string(1000, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
||||||
auto l0_files_1 = collector->GetFlushedFiles();
|
auto l0_files_1 = collector->GetFlushedFiles();
|
||||||
collector->ClearFlushedFiles();
|
collector->ClearFlushedFiles();
|
||||||
for (int i = 0; i < 500; ++i) {
|
for (int i = 0; i < 500; ++i) {
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(i),
|
||||||
std::string(1000, 'a' + (i % 26))));
|
std::string(1000, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
||||||
@ -295,7 +295,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
|
|||||||
|
|
||||||
// Create 5 files.
|
// Create 5 files.
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
ASSERT_OK(db->Put(WriteOptions(), "key" + ToString(i), "value"));
|
ASSERT_OK(db->Put(WriteOptions(), "key" + std::to_string(i), "value"));
|
||||||
ASSERT_OK(db->Flush(FlushOptions()));
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -465,7 +465,7 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) {
|
|||||||
|
|
||||||
// create couple files
|
// create couple files
|
||||||
for (int i = 0; i < 500; ++i) {
|
for (int i = 0; i < 500; ++i) {
|
||||||
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
ASSERT_OK(db->Put(WriteOptions(), std::to_string(i),
|
||||||
std::string(1000, 'a' + (i % 26))));
|
std::string(1000, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
||||||
|
@ -518,7 +518,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (max_output_file_size_ != port::kMaxUint64 &&
|
if (max_output_file_size_ != std::numeric_limits<uint64_t>::max() &&
|
||||||
(immutable_options_.compaction_style == kCompactionStyleLevel ||
|
(immutable_options_.compaction_style == kCompactionStyleLevel ||
|
||||||
output_level() > 0)) {
|
output_level() > 0)) {
|
||||||
preallocation_size = std::min(max_output_file_size_, preallocation_size);
|
preallocation_size = std::min(max_output_file_size_, preallocation_size);
|
||||||
@ -616,7 +616,7 @@ bool Compaction::DoesInputReferenceBlobFiles() const {
|
|||||||
|
|
||||||
uint64_t Compaction::MinInputFileOldestAncesterTime(
|
uint64_t Compaction::MinInputFileOldestAncesterTime(
|
||||||
const InternalKey* start, const InternalKey* end) const {
|
const InternalKey* start, const InternalKey* end) const {
|
||||||
uint64_t min_oldest_ancester_time = port::kMaxUint64;
|
uint64_t min_oldest_ancester_time = std::numeric_limits<uint64_t>::max();
|
||||||
const InternalKeyComparator& icmp =
|
const InternalKeyComparator& icmp =
|
||||||
column_family_data()->internal_comparator();
|
column_family_data()->internal_comparator();
|
||||||
for (const auto& level_files : inputs_) {
|
for (const auto& level_files : inputs_) {
|
||||||
|
@ -24,40 +24,39 @@ CompactionIterator::CompactionIterator(
|
|||||||
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
||||||
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
const SnapshotChecker* snapshot_checker, Env* env,
|
SequenceNumber job_snapshot, const SnapshotChecker* snapshot_checker,
|
||||||
bool report_detailed_time, bool expect_valid_internal_key,
|
Env* env, bool report_detailed_time, bool expect_valid_internal_key,
|
||||||
CompactionRangeDelAggregator* range_del_agg,
|
CompactionRangeDelAggregator* range_del_agg,
|
||||||
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
||||||
const Compaction* compaction, const CompactionFilter* compaction_filter,
|
bool enforce_single_del_contracts, const Compaction* compaction,
|
||||||
|
const CompactionFilter* compaction_filter,
|
||||||
const std::atomic<bool>* shutting_down,
|
const std::atomic<bool>* shutting_down,
|
||||||
const SequenceNumber preserve_deletes_seqnum,
|
|
||||||
const std::atomic<int>* manual_compaction_paused,
|
const std::atomic<int>* manual_compaction_paused,
|
||||||
const std::atomic<bool>* manual_compaction_canceled,
|
const std::atomic<bool>* manual_compaction_canceled,
|
||||||
const std::shared_ptr<Logger> info_log,
|
const std::shared_ptr<Logger> info_log,
|
||||||
const std::string* full_history_ts_low)
|
const std::string* full_history_ts_low)
|
||||||
: CompactionIterator(
|
: CompactionIterator(
|
||||||
input, cmp, merge_helper, last_sequence, snapshots,
|
input, cmp, merge_helper, last_sequence, snapshots,
|
||||||
earliest_write_conflict_snapshot, snapshot_checker, env,
|
earliest_write_conflict_snapshot, job_snapshot, snapshot_checker, env,
|
||||||
report_detailed_time, expect_valid_internal_key, range_del_agg,
|
report_detailed_time, expect_valid_internal_key, range_del_agg,
|
||||||
blob_file_builder, allow_data_in_errors,
|
blob_file_builder, allow_data_in_errors, enforce_single_del_contracts,
|
||||||
std::unique_ptr<CompactionProxy>(
|
std::unique_ptr<CompactionProxy>(
|
||||||
compaction ? new RealCompaction(compaction) : nullptr),
|
compaction ? new RealCompaction(compaction) : nullptr),
|
||||||
compaction_filter, shutting_down, preserve_deletes_seqnum,
|
compaction_filter, shutting_down, manual_compaction_paused,
|
||||||
manual_compaction_paused, manual_compaction_canceled, info_log,
|
manual_compaction_canceled, info_log, full_history_ts_low) {}
|
||||||
full_history_ts_low) {}
|
|
||||||
|
|
||||||
CompactionIterator::CompactionIterator(
|
CompactionIterator::CompactionIterator(
|
||||||
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
||||||
SequenceNumber /*last_sequence*/, std::vector<SequenceNumber>* snapshots,
|
SequenceNumber /*last_sequence*/, std::vector<SequenceNumber>* snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
const SnapshotChecker* snapshot_checker, Env* env,
|
SequenceNumber job_snapshot, const SnapshotChecker* snapshot_checker,
|
||||||
bool report_detailed_time, bool expect_valid_internal_key,
|
Env* env, bool report_detailed_time, bool expect_valid_internal_key,
|
||||||
CompactionRangeDelAggregator* range_del_agg,
|
CompactionRangeDelAggregator* range_del_agg,
|
||||||
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
||||||
|
bool enforce_single_del_contracts,
|
||||||
std::unique_ptr<CompactionProxy> compaction,
|
std::unique_ptr<CompactionProxy> compaction,
|
||||||
const CompactionFilter* compaction_filter,
|
const CompactionFilter* compaction_filter,
|
||||||
const std::atomic<bool>* shutting_down,
|
const std::atomic<bool>* shutting_down,
|
||||||
const SequenceNumber preserve_deletes_seqnum,
|
|
||||||
const std::atomic<int>* manual_compaction_paused,
|
const std::atomic<int>* manual_compaction_paused,
|
||||||
const std::atomic<bool>* manual_compaction_canceled,
|
const std::atomic<bool>* manual_compaction_canceled,
|
||||||
const std::shared_ptr<Logger> info_log,
|
const std::shared_ptr<Logger> info_log,
|
||||||
@ -68,6 +67,7 @@ CompactionIterator::CompactionIterator(
|
|||||||
merge_helper_(merge_helper),
|
merge_helper_(merge_helper),
|
||||||
snapshots_(snapshots),
|
snapshots_(snapshots),
|
||||||
earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
|
earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
|
||||||
|
job_snapshot_(job_snapshot),
|
||||||
snapshot_checker_(snapshot_checker),
|
snapshot_checker_(snapshot_checker),
|
||||||
env_(env),
|
env_(env),
|
||||||
clock_(env_->GetSystemClock().get()),
|
clock_(env_->GetSystemClock().get()),
|
||||||
@ -80,9 +80,9 @@ CompactionIterator::CompactionIterator(
|
|||||||
shutting_down_(shutting_down),
|
shutting_down_(shutting_down),
|
||||||
manual_compaction_paused_(manual_compaction_paused),
|
manual_compaction_paused_(manual_compaction_paused),
|
||||||
manual_compaction_canceled_(manual_compaction_canceled),
|
manual_compaction_canceled_(manual_compaction_canceled),
|
||||||
preserve_deletes_seqnum_(preserve_deletes_seqnum),
|
|
||||||
info_log_(info_log),
|
info_log_(info_log),
|
||||||
allow_data_in_errors_(allow_data_in_errors),
|
allow_data_in_errors_(allow_data_in_errors),
|
||||||
|
enforce_single_del_contracts_(enforce_single_del_contracts),
|
||||||
timestamp_size_(cmp_ ? cmp_->timestamp_size() : 0),
|
timestamp_size_(cmp_ ? cmp_->timestamp_size() : 0),
|
||||||
full_history_ts_low_(full_history_ts_low),
|
full_history_ts_low_(full_history_ts_low),
|
||||||
current_user_key_sequence_(0),
|
current_user_key_sequence_(0),
|
||||||
@ -237,6 +237,10 @@ bool CompactionIterator::InvokeFilterIfNeeded(bool* need_skip,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_SYNC_POINT_CALLBACK(
|
||||||
|
"CompactionIterator::InvokeFilterIfNeeded::TamperWithBlobIndex",
|
||||||
|
&value_);
|
||||||
|
|
||||||
// For integrated BlobDB impl, CompactionIterator reads blob value.
|
// For integrated BlobDB impl, CompactionIterator reads blob value.
|
||||||
// For Stacked BlobDB impl, the corresponding CompactionFilter's
|
// For Stacked BlobDB impl, the corresponding CompactionFilter's
|
||||||
// FilterV2 method should read the blob value.
|
// FilterV2 method should read the blob value.
|
||||||
@ -306,6 +310,14 @@ bool CompactionIterator::InvokeFilterIfNeeded(bool* need_skip,
|
|||||||
// no value associated with delete
|
// no value associated with delete
|
||||||
value_.clear();
|
value_.clear();
|
||||||
iter_stats_.num_record_drop_user++;
|
iter_stats_.num_record_drop_user++;
|
||||||
|
} else if (filter == CompactionFilter::Decision::kPurge) {
|
||||||
|
// convert the current key to a single delete; key_ is pointing into
|
||||||
|
// current_key_ at this point, so updating current_key_ updates key()
|
||||||
|
ikey_.type = kTypeSingleDeletion;
|
||||||
|
current_key_.UpdateInternalKey(ikey_.sequence, kTypeSingleDeletion);
|
||||||
|
// no value associated with single delete
|
||||||
|
value_.clear();
|
||||||
|
iter_stats_.num_record_drop_user++;
|
||||||
} else if (filter == CompactionFilter::Decision::kChangeValue) {
|
} else if (filter == CompactionFilter::Decision::kChangeValue) {
|
||||||
if (ikey_.type == kTypeBlobIndex) {
|
if (ikey_.type == kTypeBlobIndex) {
|
||||||
// value transfer from blob file to inlined data
|
// value transfer from blob file to inlined data
|
||||||
@ -624,24 +636,39 @@ void CompactionIterator::NextFromInput() {
|
|||||||
|
|
||||||
TEST_SYNC_POINT_CALLBACK(
|
TEST_SYNC_POINT_CALLBACK(
|
||||||
"CompactionIterator::NextFromInput:SingleDelete:2", nullptr);
|
"CompactionIterator::NextFromInput:SingleDelete:2", nullptr);
|
||||||
if (next_ikey.type == kTypeSingleDeletion ||
|
if (next_ikey.type == kTypeSingleDeletion) {
|
||||||
next_ikey.type == kTypeDeletion) {
|
|
||||||
// We encountered two SingleDeletes for same key in a row. This
|
// We encountered two SingleDeletes for same key in a row. This
|
||||||
// could be due to unexpected user input. If write-(un)prepared
|
// could be due to unexpected user input. If write-(un)prepared
|
||||||
// transaction is used, this could also be due to releasing an old
|
// transaction is used, this could also be due to releasing an old
|
||||||
// snapshot between a Put and its matching SingleDelete.
|
// snapshot between a Put and its matching SingleDelete.
|
||||||
// Furthermore, if write-(un)prepared transaction is rolled back
|
|
||||||
// after prepare, we will write a Delete to cancel a prior Put. If
|
|
||||||
// old snapshot is released between a later Put and its matching
|
|
||||||
// SingleDelete, we will end up with a Delete followed by
|
|
||||||
// SingleDelete.
|
|
||||||
// Skip the first SingleDelete and let the next iteration decide
|
// Skip the first SingleDelete and let the next iteration decide
|
||||||
// how to handle the second SingleDelete or Delete.
|
// how to handle the second SingleDelete.
|
||||||
|
|
||||||
// First SingleDelete has been skipped since we already called
|
// First SingleDelete has been skipped since we already called
|
||||||
// input_.Next().
|
// input_.Next().
|
||||||
++iter_stats_.num_record_drop_obsolete;
|
++iter_stats_.num_record_drop_obsolete;
|
||||||
++iter_stats_.num_single_del_mismatch;
|
++iter_stats_.num_single_del_mismatch;
|
||||||
|
} else if (next_ikey.type == kTypeDeletion) {
|
||||||
|
std::ostringstream oss;
|
||||||
|
oss << "Found SD and type: " << static_cast<int>(next_ikey.type)
|
||||||
|
<< " on the same key, violating the contract "
|
||||||
|
"of SingleDelete. Check your application to make sure the "
|
||||||
|
"application does not mix SingleDelete and Delete for "
|
||||||
|
"the same key. If you are using "
|
||||||
|
"write-prepared/write-unprepared transactions, and use "
|
||||||
|
"SingleDelete to delete certain keys, then make sure "
|
||||||
|
"TransactionDBOptions::rollback_deletion_type_callback is "
|
||||||
|
"configured properly. Mixing SD and DEL can lead to "
|
||||||
|
"undefined behaviors";
|
||||||
|
++iter_stats_.num_record_drop_obsolete;
|
||||||
|
++iter_stats_.num_single_del_mismatch;
|
||||||
|
if (enforce_single_del_contracts_) {
|
||||||
|
ROCKS_LOG_ERROR(info_log_, "%s", oss.str().c_str());
|
||||||
|
valid_ = false;
|
||||||
|
status_ = Status::Corruption(oss.str());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ROCKS_LOG_WARN(info_log_, "%s", oss.str().c_str());
|
||||||
} else if (!is_timestamp_eligible_for_gc) {
|
} else if (!is_timestamp_eligible_for_gc) {
|
||||||
// We cannot drop the SingleDelete as timestamp is enabled, and
|
// We cannot drop the SingleDelete as timestamp is enabled, and
|
||||||
// timestamp of this key is greater than or equal to
|
// timestamp of this key is greater than or equal to
|
||||||
@ -758,7 +785,6 @@ void CompactionIterator::NextFromInput() {
|
|||||||
(ikey_.type == kTypeDeletionWithTimestamp &&
|
(ikey_.type == kTypeDeletionWithTimestamp &&
|
||||||
cmp_with_history_ts_low_ < 0)) &&
|
cmp_with_history_ts_low_ < 0)) &&
|
||||||
DefinitelyInSnapshot(ikey_.sequence, earliest_snapshot_) &&
|
DefinitelyInSnapshot(ikey_.sequence, earliest_snapshot_) &&
|
||||||
ikeyNotNeededForIncrementalSnapshot() &&
|
|
||||||
compaction_->KeyNotExistsBeyondOutputLevel(ikey_.user_key,
|
compaction_->KeyNotExistsBeyondOutputLevel(ikey_.user_key,
|
||||||
&level_ptrs_)) {
|
&level_ptrs_)) {
|
||||||
// TODO(noetzli): This is the only place where we use compaction_
|
// TODO(noetzli): This is the only place where we use compaction_
|
||||||
@ -792,7 +818,7 @@ void CompactionIterator::NextFromInput() {
|
|||||||
} else if ((ikey_.type == kTypeDeletion ||
|
} else if ((ikey_.type == kTypeDeletion ||
|
||||||
(ikey_.type == kTypeDeletionWithTimestamp &&
|
(ikey_.type == kTypeDeletionWithTimestamp &&
|
||||||
cmp_with_history_ts_low_ < 0)) &&
|
cmp_with_history_ts_low_ < 0)) &&
|
||||||
bottommost_level_ && ikeyNotNeededForIncrementalSnapshot()) {
|
bottommost_level_) {
|
||||||
// Handle the case where we have a delete key at the bottom most level
|
// Handle the case where we have a delete key at the bottom most level
|
||||||
// We can skip outputting the key iff there are no subsequent puts for this
|
// We can skip outputting the key iff there are no subsequent puts for this
|
||||||
// key
|
// key
|
||||||
@ -954,6 +980,10 @@ void CompactionIterator::GarbageCollectBlobIfNeeded() {
|
|||||||
|
|
||||||
// GC for integrated BlobDB
|
// GC for integrated BlobDB
|
||||||
if (compaction_->enable_blob_garbage_collection()) {
|
if (compaction_->enable_blob_garbage_collection()) {
|
||||||
|
TEST_SYNC_POINT_CALLBACK(
|
||||||
|
"CompactionIterator::GarbageCollectBlobIfNeeded::TamperWithBlobIndex",
|
||||||
|
&value_);
|
||||||
|
|
||||||
BlobIndex blob_index;
|
BlobIndex blob_index;
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -1060,10 +1090,9 @@ void CompactionIterator::PrepareOutput() {
|
|||||||
// Can we do the same for levels above bottom level as long as
|
// Can we do the same for levels above bottom level as long as
|
||||||
// KeyNotExistsBeyondOutputLevel() return true?
|
// KeyNotExistsBeyondOutputLevel() return true?
|
||||||
if (valid_ && compaction_ != nullptr &&
|
if (valid_ && compaction_ != nullptr &&
|
||||||
!compaction_->allow_ingest_behind() &&
|
!compaction_->allow_ingest_behind() && bottommost_level_ &&
|
||||||
ikeyNotNeededForIncrementalSnapshot() && bottommost_level_ &&
|
|
||||||
DefinitelyInSnapshot(ikey_.sequence, earliest_snapshot_) &&
|
DefinitelyInSnapshot(ikey_.sequence, earliest_snapshot_) &&
|
||||||
ikey_.type != kTypeMerge) {
|
ikey_.type != kTypeMerge && current_key_committed_) {
|
||||||
assert(ikey_.type != kTypeDeletion);
|
assert(ikey_.type != kTypeDeletion);
|
||||||
assert(ikey_.type != kTypeSingleDeletion ||
|
assert(ikey_.type != kTypeSingleDeletion ||
|
||||||
(timestamp_size_ || full_history_ts_low_));
|
(timestamp_size_ || full_history_ts_low_));
|
||||||
@ -1139,13 +1168,6 @@ inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot(
|
|||||||
return kMaxSequenceNumber;
|
return kMaxSequenceNumber;
|
||||||
}
|
}
|
||||||
|
|
||||||
// used in 2 places - prevents deletion markers to be dropped if they may be
|
|
||||||
// needed and disables seqnum zero-out in PrepareOutput for recent keys.
|
|
||||||
inline bool CompactionIterator::ikeyNotNeededForIncrementalSnapshot() {
|
|
||||||
return (!compaction_->preserve_deletes()) ||
|
|
||||||
(ikey_.sequence < preserve_deletes_seqnum_);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t CompactionIterator::ComputeBlobGarbageCollectionCutoffFileNumber(
|
uint64_t CompactionIterator::ComputeBlobGarbageCollectionCutoffFileNumber(
|
||||||
const CompactionProxy* compaction) {
|
const CompactionProxy* compaction) {
|
||||||
if (!compaction) {
|
if (!compaction) {
|
||||||
|
@ -92,8 +92,6 @@ class CompactionIterator {
|
|||||||
|
|
||||||
virtual bool allow_ingest_behind() const = 0;
|
virtual bool allow_ingest_behind() const = 0;
|
||||||
|
|
||||||
virtual bool preserve_deletes() const = 0;
|
|
||||||
|
|
||||||
virtual bool allow_mmap_reads() const = 0;
|
virtual bool allow_mmap_reads() const = 0;
|
||||||
|
|
||||||
virtual bool enable_blob_garbage_collection() const = 0;
|
virtual bool enable_blob_garbage_collection() const = 0;
|
||||||
@ -139,8 +137,6 @@ class CompactionIterator {
|
|||||||
return compaction_->immutable_options()->allow_ingest_behind;
|
return compaction_->immutable_options()->allow_ingest_behind;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool preserve_deletes() const override { return false; }
|
|
||||||
|
|
||||||
bool allow_mmap_reads() const override {
|
bool allow_mmap_reads() const override {
|
||||||
return compaction_->immutable_options()->allow_mmap_reads;
|
return compaction_->immutable_options()->allow_mmap_reads;
|
||||||
}
|
}
|
||||||
@ -176,14 +172,13 @@ class CompactionIterator {
|
|||||||
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
||||||
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
const SnapshotChecker* snapshot_checker, Env* env,
|
SequenceNumber job_snapshot, const SnapshotChecker* snapshot_checker,
|
||||||
bool report_detailed_time, bool expect_valid_internal_key,
|
Env* env, bool report_detailed_time, bool expect_valid_internal_key,
|
||||||
CompactionRangeDelAggregator* range_del_agg,
|
CompactionRangeDelAggregator* range_del_agg,
|
||||||
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
||||||
const Compaction* compaction = nullptr,
|
bool enforce_single_del_contracts, const Compaction* compaction = nullptr,
|
||||||
const CompactionFilter* compaction_filter = nullptr,
|
const CompactionFilter* compaction_filter = nullptr,
|
||||||
const std::atomic<bool>* shutting_down = nullptr,
|
const std::atomic<bool>* shutting_down = nullptr,
|
||||||
const SequenceNumber preserve_deletes_seqnum = 0,
|
|
||||||
const std::atomic<int>* manual_compaction_paused = nullptr,
|
const std::atomic<int>* manual_compaction_paused = nullptr,
|
||||||
const std::atomic<bool>* manual_compaction_canceled = nullptr,
|
const std::atomic<bool>* manual_compaction_canceled = nullptr,
|
||||||
const std::shared_ptr<Logger> info_log = nullptr,
|
const std::shared_ptr<Logger> info_log = nullptr,
|
||||||
@ -194,14 +189,14 @@ class CompactionIterator {
|
|||||||
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
||||||
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
const SnapshotChecker* snapshot_checker, Env* env,
|
SequenceNumber job_snapshot, const SnapshotChecker* snapshot_checker,
|
||||||
bool report_detailed_time, bool expect_valid_internal_key,
|
Env* env, bool report_detailed_time, bool expect_valid_internal_key,
|
||||||
CompactionRangeDelAggregator* range_del_agg,
|
CompactionRangeDelAggregator* range_del_agg,
|
||||||
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
||||||
|
bool enforce_single_del_contracts,
|
||||||
std::unique_ptr<CompactionProxy> compaction,
|
std::unique_ptr<CompactionProxy> compaction,
|
||||||
const CompactionFilter* compaction_filter = nullptr,
|
const CompactionFilter* compaction_filter = nullptr,
|
||||||
const std::atomic<bool>* shutting_down = nullptr,
|
const std::atomic<bool>* shutting_down = nullptr,
|
||||||
const SequenceNumber preserve_deletes_seqnum = 0,
|
|
||||||
const std::atomic<int>* manual_compaction_paused = nullptr,
|
const std::atomic<int>* manual_compaction_paused = nullptr,
|
||||||
const std::atomic<bool>* manual_compaction_canceled = nullptr,
|
const std::atomic<bool>* manual_compaction_canceled = nullptr,
|
||||||
const std::shared_ptr<Logger> info_log = nullptr,
|
const std::shared_ptr<Logger> info_log = nullptr,
|
||||||
@ -272,14 +267,9 @@ class CompactionIterator {
|
|||||||
inline SequenceNumber findEarliestVisibleSnapshot(
|
inline SequenceNumber findEarliestVisibleSnapshot(
|
||||||
SequenceNumber in, SequenceNumber* prev_snapshot);
|
SequenceNumber in, SequenceNumber* prev_snapshot);
|
||||||
|
|
||||||
// Checks whether the currently seen ikey_ is needed for
|
|
||||||
// incremental (differential) snapshot and hence can't be dropped
|
|
||||||
// or seqnum be zero-ed out even if all other conditions for it are met.
|
|
||||||
inline bool ikeyNotNeededForIncrementalSnapshot();
|
|
||||||
|
|
||||||
inline bool KeyCommitted(SequenceNumber sequence) {
|
inline bool KeyCommitted(SequenceNumber sequence) {
|
||||||
return snapshot_checker_ == nullptr ||
|
return snapshot_checker_ == nullptr ||
|
||||||
snapshot_checker_->CheckInSnapshot(sequence, kMaxSequenceNumber) ==
|
snapshot_checker_->CheckInSnapshot(sequence, job_snapshot_) ==
|
||||||
SnapshotCheckerResult::kInSnapshot;
|
SnapshotCheckerResult::kInSnapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -320,6 +310,7 @@ class CompactionIterator {
|
|||||||
std::unordered_set<SequenceNumber> released_snapshots_;
|
std::unordered_set<SequenceNumber> released_snapshots_;
|
||||||
std::vector<SequenceNumber>::const_iterator earliest_snapshot_iter_;
|
std::vector<SequenceNumber>::const_iterator earliest_snapshot_iter_;
|
||||||
const SequenceNumber earliest_write_conflict_snapshot_;
|
const SequenceNumber earliest_write_conflict_snapshot_;
|
||||||
|
const SequenceNumber job_snapshot_;
|
||||||
const SnapshotChecker* const snapshot_checker_;
|
const SnapshotChecker* const snapshot_checker_;
|
||||||
Env* env_;
|
Env* env_;
|
||||||
SystemClock* clock_;
|
SystemClock* clock_;
|
||||||
@ -332,7 +323,6 @@ class CompactionIterator {
|
|||||||
const std::atomic<bool>* shutting_down_;
|
const std::atomic<bool>* shutting_down_;
|
||||||
const std::atomic<int>* manual_compaction_paused_;
|
const std::atomic<int>* manual_compaction_paused_;
|
||||||
const std::atomic<bool>* manual_compaction_canceled_;
|
const std::atomic<bool>* manual_compaction_canceled_;
|
||||||
const SequenceNumber preserve_deletes_seqnum_;
|
|
||||||
bool bottommost_level_;
|
bool bottommost_level_;
|
||||||
bool valid_ = false;
|
bool valid_ = false;
|
||||||
bool visible_at_tip_;
|
bool visible_at_tip_;
|
||||||
@ -343,6 +333,8 @@ class CompactionIterator {
|
|||||||
|
|
||||||
bool allow_data_in_errors_;
|
bool allow_data_in_errors_;
|
||||||
|
|
||||||
|
const bool enforce_single_del_contracts_;
|
||||||
|
|
||||||
// Comes from comparator.
|
// Comes from comparator.
|
||||||
const size_t timestamp_size_;
|
const size_t timestamp_size_;
|
||||||
|
|
||||||
|
@ -166,8 +166,6 @@ class FakeCompaction : public CompactionIterator::CompactionProxy {
|
|||||||
|
|
||||||
bool allow_ingest_behind() const override { return is_allow_ingest_behind; }
|
bool allow_ingest_behind() const override { return is_allow_ingest_behind; }
|
||||||
|
|
||||||
bool preserve_deletes() const override { return false; }
|
|
||||||
|
|
||||||
bool allow_mmap_reads() const override { return false; }
|
bool allow_mmap_reads() const override { return false; }
|
||||||
|
|
||||||
bool enable_blob_garbage_collection() const override { return false; }
|
bool enable_blob_garbage_collection() const override { return false; }
|
||||||
@ -277,11 +275,12 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
|
|||||||
iter_->SeekToFirst();
|
iter_->SeekToFirst();
|
||||||
c_iter_.reset(new CompactionIterator(
|
c_iter_.reset(new CompactionIterator(
|
||||||
iter_.get(), cmp_, merge_helper_.get(), last_sequence, &snapshots_,
|
iter_.get(), cmp_, merge_helper_.get(), last_sequence, &snapshots_,
|
||||||
earliest_write_conflict_snapshot, snapshot_checker_.get(),
|
earliest_write_conflict_snapshot, kMaxSequenceNumber,
|
||||||
Env::Default(), false /* report_detailed_time */, false,
|
snapshot_checker_.get(), Env::Default(),
|
||||||
range_del_agg_.get(), nullptr /* blob_file_builder */,
|
false /* report_detailed_time */, false, range_del_agg_.get(),
|
||||||
true /*allow_data_in_errors*/, std::move(compaction), filter,
|
nullptr /* blob_file_builder */, true /*allow_data_in_errors*/,
|
||||||
&shutting_down_, /*preserve_deletes_seqnum=*/0,
|
true /*enforce_single_del_contracts*/, std::move(compaction), filter,
|
||||||
|
&shutting_down_,
|
||||||
/*manual_compaction_paused=*/nullptr,
|
/*manual_compaction_paused=*/nullptr,
|
||||||
/*manual_compaction_canceled=*/nullptr, /*info_log=*/nullptr,
|
/*manual_compaction_canceled=*/nullptr, /*info_log=*/nullptr,
|
||||||
full_history_ts_low));
|
full_history_ts_low));
|
||||||
@ -315,7 +314,7 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
|
|||||||
key_not_exists_beyond_output_level, full_history_ts_low);
|
key_not_exists_beyond_output_level, full_history_ts_low);
|
||||||
c_iter_->SeekToFirst();
|
c_iter_->SeekToFirst();
|
||||||
for (size_t i = 0; i < expected_keys.size(); i++) {
|
for (size_t i = 0; i < expected_keys.size(); i++) {
|
||||||
std::string info = "i = " + ToString(i);
|
std::string info = "i = " + std::to_string(i);
|
||||||
ASSERT_TRUE(c_iter_->Valid()) << info;
|
ASSERT_TRUE(c_iter_->Valid()) << info;
|
||||||
ASSERT_OK(c_iter_->status()) << info;
|
ASSERT_OK(c_iter_->status()) << info;
|
||||||
ASSERT_EQ(expected_keys[i], c_iter_->key().ToString()) << info;
|
ASSERT_EQ(expected_keys[i], c_iter_->key().ToString()) << info;
|
||||||
|
@ -417,16 +417,17 @@ CompactionJob::CompactionJob(
|
|||||||
int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
|
int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
|
||||||
const MutableDBOptions& mutable_db_options, const FileOptions& file_options,
|
const MutableDBOptions& mutable_db_options, const FileOptions& file_options,
|
||||||
VersionSet* versions, const std::atomic<bool>* shutting_down,
|
VersionSet* versions, const std::atomic<bool>* shutting_down,
|
||||||
const SequenceNumber preserve_deletes_seqnum, LogBuffer* log_buffer,
|
LogBuffer* log_buffer, FSDirectory* db_directory,
|
||||||
FSDirectory* db_directory, FSDirectory* output_directory,
|
FSDirectory* output_directory, FSDirectory* blob_output_directory,
|
||||||
FSDirectory* blob_output_directory, Statistics* stats,
|
Statistics* stats, InstrumentedMutex* db_mutex,
|
||||||
InstrumentedMutex* db_mutex, ErrorHandler* db_error_handler,
|
ErrorHandler* db_error_handler,
|
||||||
std::vector<SequenceNumber> existing_snapshots,
|
std::vector<SequenceNumber> existing_snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
const SnapshotChecker* snapshot_checker, std::shared_ptr<Cache> table_cache,
|
const SnapshotChecker* snapshot_checker, JobContext* job_context,
|
||||||
EventLogger* event_logger, bool paranoid_file_checks, bool measure_io_stats,
|
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
||||||
const std::string& dbname, CompactionJobStats* compaction_job_stats,
|
bool paranoid_file_checks, bool measure_io_stats, const std::string& dbname,
|
||||||
Env::Priority thread_pri, const std::shared_ptr<IOTracer>& io_tracer,
|
CompactionJobStats* compaction_job_stats, Env::Priority thread_pri,
|
||||||
|
const std::shared_ptr<IOTracer>& io_tracer,
|
||||||
const std::atomic<int>* manual_compaction_paused,
|
const std::atomic<int>* manual_compaction_paused,
|
||||||
const std::atomic<bool>* manual_compaction_canceled,
|
const std::atomic<bool>* manual_compaction_canceled,
|
||||||
const std::string& db_id, const std::string& db_session_id,
|
const std::string& db_id, const std::string& db_session_id,
|
||||||
@ -456,7 +457,6 @@ CompactionJob::CompactionJob(
|
|||||||
shutting_down_(shutting_down),
|
shutting_down_(shutting_down),
|
||||||
manual_compaction_paused_(manual_compaction_paused),
|
manual_compaction_paused_(manual_compaction_paused),
|
||||||
manual_compaction_canceled_(manual_compaction_canceled),
|
manual_compaction_canceled_(manual_compaction_canceled),
|
||||||
preserve_deletes_seqnum_(preserve_deletes_seqnum),
|
|
||||||
db_directory_(db_directory),
|
db_directory_(db_directory),
|
||||||
blob_output_directory_(blob_output_directory),
|
blob_output_directory_(blob_output_directory),
|
||||||
db_mutex_(db_mutex),
|
db_mutex_(db_mutex),
|
||||||
@ -464,6 +464,7 @@ CompactionJob::CompactionJob(
|
|||||||
existing_snapshots_(std::move(existing_snapshots)),
|
existing_snapshots_(std::move(existing_snapshots)),
|
||||||
earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
|
earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
|
||||||
snapshot_checker_(snapshot_checker),
|
snapshot_checker_(snapshot_checker),
|
||||||
|
job_context_(job_context),
|
||||||
table_cache_(std::move(table_cache)),
|
table_cache_(std::move(table_cache)),
|
||||||
event_logger_(event_logger),
|
event_logger_(event_logger),
|
||||||
paranoid_file_checks_(paranoid_file_checks),
|
paranoid_file_checks_(paranoid_file_checks),
|
||||||
@ -1252,7 +1253,7 @@ void CompactionJob::NotifyOnSubcompactionBegin(
|
|||||||
if (shutting_down_->load(std::memory_order_acquire)) {
|
if (shutting_down_->load(std::memory_order_acquire)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (c->is_manual_compaction() &&
|
if (c->is_manual_compaction() && manual_compaction_paused_ &&
|
||||||
manual_compaction_paused_->load(std::memory_order_acquire) > 0) {
|
manual_compaction_paused_->load(std::memory_order_acquire) > 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1469,14 +1470,17 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
|||||||
Status status;
|
Status status;
|
||||||
const std::string* const full_history_ts_low =
|
const std::string* const full_history_ts_low =
|
||||||
full_history_ts_low_.empty() ? nullptr : &full_history_ts_low_;
|
full_history_ts_low_.empty() ? nullptr : &full_history_ts_low_;
|
||||||
|
const SequenceNumber job_snapshot_seq =
|
||||||
|
job_context_ ? job_context_->GetJobSnapshotSequence()
|
||||||
|
: kMaxSequenceNumber;
|
||||||
sub_compact->c_iter.reset(new CompactionIterator(
|
sub_compact->c_iter.reset(new CompactionIterator(
|
||||||
input, cfd->user_comparator(), &merge, versions_->LastSequence(),
|
input, cfd->user_comparator(), &merge, versions_->LastSequence(),
|
||||||
&existing_snapshots_, earliest_write_conflict_snapshot_,
|
&existing_snapshots_, earliest_write_conflict_snapshot_, job_snapshot_seq,
|
||||||
snapshot_checker_, env_, ShouldReportDetailedTime(env_, stats_),
|
snapshot_checker_, env_, ShouldReportDetailedTime(env_, stats_),
|
||||||
/*expect_valid_internal_key=*/true, &range_del_agg,
|
/*expect_valid_internal_key=*/true, &range_del_agg,
|
||||||
blob_file_builder.get(), db_options_.allow_data_in_errors,
|
blob_file_builder.get(), db_options_.allow_data_in_errors,
|
||||||
sub_compact->compaction, compaction_filter, shutting_down_,
|
db_options_.enforce_single_del_contracts, sub_compact->compaction,
|
||||||
preserve_deletes_seqnum_, manual_compaction_paused_,
|
compaction_filter, shutting_down_, manual_compaction_paused_,
|
||||||
manual_compaction_canceled_, db_options_.info_log, full_history_ts_low));
|
manual_compaction_canceled_, db_options_.info_log, full_history_ts_low));
|
||||||
auto c_iter = sub_compact->c_iter.get();
|
auto c_iter = sub_compact->c_iter.get();
|
||||||
c_iter->SeekToFirst();
|
c_iter->SeekToFirst();
|
||||||
@ -1529,11 +1533,15 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const ParsedInternalKey& ikey = c_iter->ikey();
|
||||||
|
status = sub_compact->current_output()->meta.UpdateBoundaries(
|
||||||
|
key, value, ikey.sequence, ikey.type);
|
||||||
|
if (!status.ok()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
sub_compact->current_output_file_size =
|
sub_compact->current_output_file_size =
|
||||||
sub_compact->builder->EstimatedFileSize();
|
sub_compact->builder->EstimatedFileSize();
|
||||||
const ParsedInternalKey& ikey = c_iter->ikey();
|
|
||||||
sub_compact->current_output()->meta.UpdateBoundaries(
|
|
||||||
key, value, ikey.sequence, ikey.type);
|
|
||||||
sub_compact->num_output_records++;
|
sub_compact->num_output_records++;
|
||||||
|
|
||||||
// Close output file if it is big enough. Two possibilities determine it's
|
// Close output file if it is big enough. Two possibilities determine it's
|
||||||
@ -1966,7 +1974,8 @@ Status CompactionJob::FinishCompactionOutputFile(
|
|||||||
refined_oldest_ancester_time =
|
refined_oldest_ancester_time =
|
||||||
sub_compact->compaction->MinInputFileOldestAncesterTime(
|
sub_compact->compaction->MinInputFileOldestAncesterTime(
|
||||||
&(meta->smallest), &(meta->largest));
|
&(meta->smallest), &(meta->largest));
|
||||||
if (refined_oldest_ancester_time != port::kMaxUint64) {
|
if (refined_oldest_ancester_time !=
|
||||||
|
std::numeric_limits<uint64_t>::max()) {
|
||||||
meta->oldest_ancester_time = refined_oldest_ancester_time;
|
meta->oldest_ancester_time = refined_oldest_ancester_time;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2256,7 +2265,7 @@ Status CompactionJob::OpenCompactionOutputFile(
|
|||||||
sub_compact->compaction->MinInputFileOldestAncesterTime(
|
sub_compact->compaction->MinInputFileOldestAncesterTime(
|
||||||
(sub_compact->start != nullptr) ? &tmp_start : nullptr,
|
(sub_compact->start != nullptr) ? &tmp_start : nullptr,
|
||||||
(sub_compact->end != nullptr) ? &tmp_end : nullptr);
|
(sub_compact->end != nullptr) ? &tmp_end : nullptr);
|
||||||
if (oldest_ancester_time == port::kMaxUint64) {
|
if (oldest_ancester_time == std::numeric_limits<uint64_t>::max()) {
|
||||||
oldest_ancester_time = current_time;
|
oldest_ancester_time = current_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2450,7 +2459,7 @@ void CompactionJob::LogCompaction() {
|
|||||||
<< "compaction_reason"
|
<< "compaction_reason"
|
||||||
<< GetCompactionReasonString(compaction->compaction_reason());
|
<< GetCompactionReasonString(compaction->compaction_reason());
|
||||||
for (size_t i = 0; i < compaction->num_input_levels(); ++i) {
|
for (size_t i = 0; i < compaction->num_input_levels(); ++i) {
|
||||||
stream << ("files_L" + ToString(compaction->level(i)));
|
stream << ("files_L" + std::to_string(compaction->level(i)));
|
||||||
stream.StartArray();
|
stream.StartArray();
|
||||||
for (auto f : *compaction->inputs(i)) {
|
for (auto f : *compaction->inputs(i)) {
|
||||||
stream << f->fd.GetNumber();
|
stream << f->fd.GetNumber();
|
||||||
@ -2488,19 +2497,20 @@ CompactionServiceCompactionJob::CompactionServiceCompactionJob(
|
|||||||
std::vector<SequenceNumber> existing_snapshots,
|
std::vector<SequenceNumber> existing_snapshots,
|
||||||
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
||||||
const std::string& dbname, const std::shared_ptr<IOTracer>& io_tracer,
|
const std::string& dbname, const std::shared_ptr<IOTracer>& io_tracer,
|
||||||
|
const std::atomic<bool>* manual_compaction_canceled,
|
||||||
const std::string& db_id, const std::string& db_session_id,
|
const std::string& db_id, const std::string& db_session_id,
|
||||||
const std::string& output_path,
|
const std::string& output_path,
|
||||||
const CompactionServiceInput& compaction_service_input,
|
const CompactionServiceInput& compaction_service_input,
|
||||||
CompactionServiceResult* compaction_service_result)
|
CompactionServiceResult* compaction_service_result)
|
||||||
: CompactionJob(
|
: CompactionJob(
|
||||||
job_id, compaction, db_options, mutable_db_options, file_options,
|
job_id, compaction, db_options, mutable_db_options, file_options,
|
||||||
versions, shutting_down, 0, log_buffer, nullptr, output_directory,
|
versions, shutting_down, log_buffer, nullptr, output_directory,
|
||||||
nullptr, stats, db_mutex, db_error_handler, existing_snapshots,
|
nullptr, stats, db_mutex, db_error_handler, existing_snapshots,
|
||||||
kMaxSequenceNumber, nullptr, table_cache, event_logger,
|
kMaxSequenceNumber, nullptr, nullptr, table_cache, event_logger,
|
||||||
compaction->mutable_cf_options()->paranoid_file_checks,
|
compaction->mutable_cf_options()->paranoid_file_checks,
|
||||||
compaction->mutable_cf_options()->report_bg_io_stats, dbname,
|
compaction->mutable_cf_options()->report_bg_io_stats, dbname,
|
||||||
&(compaction_service_result->stats), Env::Priority::USER, io_tracer,
|
&(compaction_service_result->stats), Env::Priority::USER, io_tracer,
|
||||||
nullptr, nullptr, db_id, db_session_id,
|
nullptr, manual_compaction_canceled, db_id, db_session_id,
|
||||||
compaction->column_family_data()->GetFullHistoryTsLow()),
|
compaction->column_family_data()->GetFullHistoryTsLow()),
|
||||||
output_path_(output_path),
|
output_path_(output_path),
|
||||||
compaction_input_(compaction_service_input),
|
compaction_input_(compaction_service_input),
|
||||||
@ -2942,6 +2952,7 @@ static std::unordered_map<std::string, OptionTypeInfo> cs_result_type_info = {
|
|||||||
const void* addr1, const void* addr2, std::string* mismatch) {
|
const void* addr1, const void* addr2, std::string* mismatch) {
|
||||||
const auto status1 = static_cast<const Status*>(addr1);
|
const auto status1 = static_cast<const Status*>(addr1);
|
||||||
const auto status2 = static_cast<const Status*>(addr2);
|
const auto status2 = static_cast<const Status*>(addr2);
|
||||||
|
|
||||||
StatusSerializationAdapter adatper1(*status1);
|
StatusSerializationAdapter adatper1(*status1);
|
||||||
StatusSerializationAdapter adapter2(*status2);
|
StatusSerializationAdapter adapter2(*status2);
|
||||||
return OptionTypeInfo::TypesAreEqual(opts, status_adapter_type_info,
|
return OptionTypeInfo::TypesAreEqual(opts, status_adapter_type_info,
|
||||||
@ -2999,7 +3010,7 @@ Status CompactionServiceInput::Read(const std::string& data_str,
|
|||||||
} else {
|
} else {
|
||||||
return Status::NotSupported(
|
return Status::NotSupported(
|
||||||
"Compaction Service Input data version not supported: " +
|
"Compaction Service Input data version not supported: " +
|
||||||
ToString(format_version));
|
std::to_string(format_version));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3028,7 +3039,7 @@ Status CompactionServiceResult::Read(const std::string& data_str,
|
|||||||
} else {
|
} else {
|
||||||
return Status::NotSupported(
|
return Status::NotSupported(
|
||||||
"Compaction Service Result data version not supported: " +
|
"Compaction Service Result data version not supported: " +
|
||||||
ToString(format_version));
|
std::to_string(format_version));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,14 +67,13 @@ class CompactionJob {
|
|||||||
int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
|
int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
|
||||||
const MutableDBOptions& mutable_db_options,
|
const MutableDBOptions& mutable_db_options,
|
||||||
const FileOptions& file_options, VersionSet* versions,
|
const FileOptions& file_options, VersionSet* versions,
|
||||||
const std::atomic<bool>* shutting_down,
|
const std::atomic<bool>* shutting_down, LogBuffer* log_buffer,
|
||||||
const SequenceNumber preserve_deletes_seqnum, LogBuffer* log_buffer,
|
|
||||||
FSDirectory* db_directory, FSDirectory* output_directory,
|
FSDirectory* db_directory, FSDirectory* output_directory,
|
||||||
FSDirectory* blob_output_directory, Statistics* stats,
|
FSDirectory* blob_output_directory, Statistics* stats,
|
||||||
InstrumentedMutex* db_mutex, ErrorHandler* db_error_handler,
|
InstrumentedMutex* db_mutex, ErrorHandler* db_error_handler,
|
||||||
std::vector<SequenceNumber> existing_snapshots,
|
std::vector<SequenceNumber> existing_snapshots,
|
||||||
SequenceNumber earliest_write_conflict_snapshot,
|
SequenceNumber earliest_write_conflict_snapshot,
|
||||||
const SnapshotChecker* snapshot_checker,
|
const SnapshotChecker* snapshot_checker, JobContext* job_context,
|
||||||
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
||||||
bool paranoid_file_checks, bool measure_io_stats,
|
bool paranoid_file_checks, bool measure_io_stats,
|
||||||
const std::string& dbname, CompactionJobStats* compaction_job_stats,
|
const std::string& dbname, CompactionJobStats* compaction_job_stats,
|
||||||
@ -196,7 +195,6 @@ class CompactionJob {
|
|||||||
const std::atomic<bool>* shutting_down_;
|
const std::atomic<bool>* shutting_down_;
|
||||||
const std::atomic<int>* manual_compaction_paused_;
|
const std::atomic<int>* manual_compaction_paused_;
|
||||||
const std::atomic<bool>* manual_compaction_canceled_;
|
const std::atomic<bool>* manual_compaction_canceled_;
|
||||||
const SequenceNumber preserve_deletes_seqnum_;
|
|
||||||
FSDirectory* db_directory_;
|
FSDirectory* db_directory_;
|
||||||
FSDirectory* blob_output_directory_;
|
FSDirectory* blob_output_directory_;
|
||||||
InstrumentedMutex* db_mutex_;
|
InstrumentedMutex* db_mutex_;
|
||||||
@ -214,6 +212,8 @@ class CompactionJob {
|
|||||||
|
|
||||||
const SnapshotChecker* const snapshot_checker_;
|
const SnapshotChecker* const snapshot_checker_;
|
||||||
|
|
||||||
|
JobContext* job_context_;
|
||||||
|
|
||||||
std::shared_ptr<Cache> table_cache_;
|
std::shared_ptr<Cache> table_cache_;
|
||||||
|
|
||||||
EventLogger* event_logger_;
|
EventLogger* event_logger_;
|
||||||
@ -345,6 +345,7 @@ class CompactionServiceCompactionJob : private CompactionJob {
|
|||||||
std::vector<SequenceNumber> existing_snapshots,
|
std::vector<SequenceNumber> existing_snapshots,
|
||||||
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
|
||||||
const std::string& dbname, const std::shared_ptr<IOTracer>& io_tracer,
|
const std::string& dbname, const std::shared_ptr<IOTracer>& io_tracer,
|
||||||
|
const std::atomic<bool>* manual_compaction_canceled,
|
||||||
const std::string& db_id, const std::string& db_session_id,
|
const std::string& db_id, const std::string& db_session_id,
|
||||||
const std::string& output_path,
|
const std::string& output_path,
|
||||||
const CompactionServiceInput& compaction_service_input,
|
const CompactionServiceInput& compaction_service_input,
|
||||||
|
@ -268,10 +268,10 @@ class CompactionJobStatsTest : public testing::Test,
|
|||||||
if (cf == 0) {
|
if (cf == 0) {
|
||||||
// default cfd
|
// default cfd
|
||||||
EXPECT_TRUE(db_->GetProperty(
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
"rocksdb.num-files-at-level" + ToString(level), &property));
|
"rocksdb.num-files-at-level" + std::to_string(level), &property));
|
||||||
} else {
|
} else {
|
||||||
EXPECT_TRUE(db_->GetProperty(
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
handles_[cf], "rocksdb.num-files-at-level" + ToString(level),
|
handles_[cf], "rocksdb.num-files-at-level" + std::to_string(level),
|
||||||
&property));
|
&property));
|
||||||
}
|
}
|
||||||
return atoi(property.c_str());
|
return atoi(property.c_str());
|
||||||
@ -672,7 +672,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
|
|||||||
snprintf(buf, kBufSize, "%d", ++num_L0_files);
|
snprintf(buf, kBufSize, "%d", ++num_L0_files);
|
||||||
ASSERT_EQ(std::string(buf), FilesPerLevel(1));
|
ASSERT_EQ(std::string(buf), FilesPerLevel(1));
|
||||||
}
|
}
|
||||||
ASSERT_EQ(ToString(num_L0_files), FilesPerLevel(1));
|
ASSERT_EQ(std::to_string(num_L0_files), FilesPerLevel(1));
|
||||||
|
|
||||||
// 2nd Phase: perform L0 -> L1 compaction.
|
// 2nd Phase: perform L0 -> L1 compaction.
|
||||||
int L0_compaction_count = 6;
|
int L0_compaction_count = 6;
|
||||||
|
@ -87,7 +87,6 @@ class CompactionJobTestBase : public testing::Test {
|
|||||||
/*block_cache_tracer=*/nullptr,
|
/*block_cache_tracer=*/nullptr,
|
||||||
/*io_tracer=*/nullptr, /*db_session_id*/ "")),
|
/*io_tracer=*/nullptr, /*db_session_id*/ "")),
|
||||||
shutting_down_(false),
|
shutting_down_(false),
|
||||||
preserve_deletes_seqnum_(0),
|
|
||||||
mock_table_factory_(new mock::MockTableFactory()),
|
mock_table_factory_(new mock::MockTableFactory()),
|
||||||
error_handler_(nullptr, db_options_, &mutex_),
|
error_handler_(nullptr, db_options_, &mutex_),
|
||||||
encode_u64_ts_(std::move(encode_u64_ts)) {
|
encode_u64_ts_(std::move(encode_u64_ts)) {
|
||||||
@ -237,8 +236,8 @@ class CompactionJobTestBase : public testing::Test {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
auto contents = mock::MakeMockFile();
|
auto contents = mock::MakeMockFile();
|
||||||
for (int k = 0; k < kKeysPerFile; ++k) {
|
for (int k = 0; k < kKeysPerFile; ++k) {
|
||||||
auto key = ToString(i * kMatchingKeys + k);
|
auto key = std::to_string(i * kMatchingKeys + k);
|
||||||
auto value = ToString(i * kKeysPerFile + k);
|
auto value = std::to_string(i * kKeysPerFile + k);
|
||||||
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
||||||
|
|
||||||
// This is how the key will look like once it's written in bottommost
|
// This is how the key will look like once it's written in bottommost
|
||||||
@ -354,11 +353,11 @@ class CompactionJobTestBase : public testing::Test {
|
|||||||
ucmp_->timestamp_size() == full_history_ts_low_.size());
|
ucmp_->timestamp_size() == full_history_ts_low_.size());
|
||||||
CompactionJob compaction_job(
|
CompactionJob compaction_job(
|
||||||
0, &compaction, db_options_, mutable_db_options_, env_options_,
|
0, &compaction, db_options_, mutable_db_options_, env_options_,
|
||||||
versions_.get(), &shutting_down_, preserve_deletes_seqnum_, &log_buffer,
|
versions_.get(), &shutting_down_, &log_buffer, nullptr, nullptr,
|
||||||
nullptr, nullptr, nullptr, nullptr, &mutex_, &error_handler_, snapshots,
|
nullptr, nullptr, &mutex_, &error_handler_, snapshots,
|
||||||
earliest_write_conflict_snapshot, snapshot_checker, table_cache_,
|
earliest_write_conflict_snapshot, snapshot_checker, nullptr,
|
||||||
&event_logger, false, false, dbname_, &compaction_job_stats_,
|
table_cache_, &event_logger, false, false, dbname_,
|
||||||
Env::Priority::USER, nullptr /* IOTracer */,
|
&compaction_job_stats_, Env::Priority::USER, nullptr /* IOTracer */,
|
||||||
/*manual_compaction_paused=*/nullptr,
|
/*manual_compaction_paused=*/nullptr,
|
||||||
/*manual_compaction_canceled=*/nullptr, /*db_id=*/"",
|
/*manual_compaction_canceled=*/nullptr, /*db_id=*/"",
|
||||||
/*db_session_id=*/"", full_history_ts_low_);
|
/*db_session_id=*/"", full_history_ts_low_);
|
||||||
@ -409,7 +408,6 @@ class CompactionJobTestBase : public testing::Test {
|
|||||||
std::unique_ptr<VersionSet> versions_;
|
std::unique_ptr<VersionSet> versions_;
|
||||||
InstrumentedMutex mutex_;
|
InstrumentedMutex mutex_;
|
||||||
std::atomic<bool> shutting_down_;
|
std::atomic<bool> shutting_down_;
|
||||||
SequenceNumber preserve_deletes_seqnum_;
|
|
||||||
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
||||||
CompactionJobStats compaction_job_stats_;
|
CompactionJobStats compaction_job_stats_;
|
||||||
ColumnFamilyData* cfd_;
|
ColumnFamilyData* cfd_;
|
||||||
@ -892,10 +890,10 @@ TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|||||||
// -> Snapshot Put
|
// -> Snapshot Put
|
||||||
// K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel
|
// K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel
|
||||||
// -> Snapshot Put Snapshot SDel
|
// -> Snapshot Put Snapshot SDel
|
||||||
// L: SDel Put Del Put SDel Snapshot Del Put Del SDel Put SDel
|
// L: SDel Put SDel Put SDel Snapshot SDel Put SDel SDel Put SDel
|
||||||
// -> Snapshot SDel
|
// -> Snapshot SDel Put SDel
|
||||||
// M: (Put) SDel Put Del Put SDel Snapshot Put Del SDel Put SDel Del
|
// M: (Put) SDel Put SDel Put SDel Snapshot Put SDel SDel Put SDel SDel
|
||||||
// -> SDel Snapshot Del
|
// -> SDel Snapshot Put SDel
|
||||||
NewDB();
|
NewDB();
|
||||||
|
|
||||||
auto file1 = mock::MakeMockFile({
|
auto file1 = mock::MakeMockFile({
|
||||||
@ -926,14 +924,14 @@ TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|||||||
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 15U, kTypeValue), "val"},
|
{KeyStr("L", 15U, kTypeValue), "val"},
|
||||||
{KeyStr("L", 14U, kTypeSingleDeletion), ""},
|
{KeyStr("L", 14U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 13U, kTypeDeletion), ""},
|
{KeyStr("L", 13U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 12U, kTypeValue), "val"},
|
{KeyStr("L", 12U, kTypeValue), "val"},
|
||||||
{KeyStr("L", 11U, kTypeDeletion), ""},
|
{KeyStr("L", 11U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 16U, kTypeDeletion), ""},
|
{KeyStr("M", 16U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 14U, kTypeValue), "val"},
|
{KeyStr("M", 14U, kTypeValue), "val"},
|
||||||
{KeyStr("M", 13U, kTypeSingleDeletion), ""},
|
{KeyStr("M", 13U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 12U, kTypeDeletion), ""},
|
{KeyStr("M", 12U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 11U, kTypeValue), "val"},
|
{KeyStr("M", 11U, kTypeValue), "val"},
|
||||||
});
|
});
|
||||||
AddMockFile(file1);
|
AddMockFile(file1);
|
||||||
@ -974,12 +972,12 @@ TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|||||||
{KeyStr("K", 1U, kTypeSingleDeletion), ""},
|
{KeyStr("K", 1U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 5U, kTypeSingleDeletion), ""},
|
{KeyStr("L", 5U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 4U, kTypeValue), "val"},
|
{KeyStr("L", 4U, kTypeValue), "val"},
|
||||||
{KeyStr("L", 3U, kTypeDeletion), ""},
|
{KeyStr("L", 3U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 2U, kTypeValue), "val"},
|
{KeyStr("L", 2U, kTypeValue), "val"},
|
||||||
{KeyStr("L", 1U, kTypeSingleDeletion), ""},
|
{KeyStr("L", 1U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 10U, kTypeSingleDeletion), ""},
|
{KeyStr("M", 10U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 7U, kTypeValue), "val"},
|
{KeyStr("M", 7U, kTypeValue), "val"},
|
||||||
{KeyStr("M", 5U, kTypeDeletion), ""},
|
{KeyStr("M", 5U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("M", 4U, kTypeValue), "val"},
|
{KeyStr("M", 4U, kTypeValue), "val"},
|
||||||
{KeyStr("M", 3U, kTypeSingleDeletion), ""},
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""},
|
||||||
});
|
});
|
||||||
@ -1021,7 +1019,9 @@ TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|||||||
{KeyStr("K", 8U, kTypeValue), "val3"},
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
||||||
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
||||||
{KeyStr("L", 15U, kTypeValue), ""},
|
{KeyStr("L", 15U, kTypeValue), ""},
|
||||||
{KeyStr("M", 16U, kTypeDeletion), ""},
|
{KeyStr("L", 11U, kTypeSingleDeletion), ""},
|
||||||
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
||||||
|
{KeyStr("M", 14U, kTypeValue), ""},
|
||||||
{KeyStr("M", 3U, kTypeSingleDeletion), ""}});
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""}});
|
||||||
|
|
||||||
SetLastSequence(22U);
|
SetLastSequence(22U);
|
||||||
@ -1107,6 +1107,21 @@ TEST_F(CompactionJobTest, OldestBlobFileNumber) {
|
|||||||
/* expected_oldest_blob_file_number */ 19);
|
/* expected_oldest_blob_file_number */ 19);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(CompactionJobTest, NoEnforceSingleDeleteContract) {
|
||||||
|
db_options_.enforce_single_del_contracts = false;
|
||||||
|
NewDB();
|
||||||
|
|
||||||
|
auto file =
|
||||||
|
mock::MakeMockFile({{KeyStr("a", 4U, kTypeSingleDeletion), ""},
|
||||||
|
{KeyStr("a", 3U, kTypeDeletion), "dontcare"}});
|
||||||
|
AddMockFile(file);
|
||||||
|
SetLastSequence(4U);
|
||||||
|
|
||||||
|
auto expected_results = mock::MakeMockFile();
|
||||||
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
||||||
|
RunCompaction({files}, expected_results);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(CompactionJobTest, InputSerialization) {
|
TEST_F(CompactionJobTest, InputSerialization) {
|
||||||
// Setup a random CompactionServiceInput
|
// Setup a random CompactionServiceInput
|
||||||
CompactionServiceInput input;
|
CompactionServiceInput input;
|
||||||
|
@ -65,7 +65,7 @@ bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
|
|||||||
size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size);
|
size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size);
|
||||||
uint64_t compensated_compact_bytes =
|
uint64_t compensated_compact_bytes =
|
||||||
level_files[start]->compensated_file_size;
|
level_files[start]->compensated_file_size;
|
||||||
size_t compact_bytes_per_del_file = port::kMaxSizet;
|
size_t compact_bytes_per_del_file = std::numeric_limits<size_t>::max();
|
||||||
// Compaction range will be [start, limit).
|
// Compaction range will be [start, limit).
|
||||||
size_t limit;
|
size_t limit;
|
||||||
// Pull in files until the amount of compaction work per deleted file begins
|
// Pull in files until the amount of compaction work per deleted file begins
|
||||||
@ -401,7 +401,7 @@ Status CompactionPicker::GetCompactionInputsFromFileNumbers(
|
|||||||
"Cannot find matched SST files for the following file numbers:");
|
"Cannot find matched SST files for the following file numbers:");
|
||||||
for (auto fn : *input_set) {
|
for (auto fn : *input_set) {
|
||||||
message += " ";
|
message += " ";
|
||||||
message += ToString(fn);
|
message += std::to_string(fn);
|
||||||
}
|
}
|
||||||
return Status::InvalidArgument(message);
|
return Status::InvalidArgument(message);
|
||||||
}
|
}
|
||||||
@ -717,7 +717,7 @@ Compaction* CompactionPicker::CompactRange(
|
|||||||
// files that are created during the current compaction.
|
// files that are created during the current compaction.
|
||||||
if (compact_range_options.bottommost_level_compaction ==
|
if (compact_range_options.bottommost_level_compaction ==
|
||||||
BottommostLevelCompaction::kForceOptimized &&
|
BottommostLevelCompaction::kForceOptimized &&
|
||||||
max_file_num_to_ignore != port::kMaxUint64) {
|
max_file_num_to_ignore != std::numeric_limits<uint64_t>::max()) {
|
||||||
assert(input_level == output_level);
|
assert(input_level == output_level);
|
||||||
// inputs_shrunk holds a continuous subset of input files which were all
|
// inputs_shrunk holds a continuous subset of input files which were all
|
||||||
// created before the current manual compaction
|
// created before the current manual compaction
|
||||||
@ -1004,14 +1004,14 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
|
|||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"Output level for column family " + cf_meta.name +
|
"Output level for column family " + cf_meta.name +
|
||||||
" must between [0, " +
|
" must between [0, " +
|
||||||
ToString(cf_meta.levels[cf_meta.levels.size() - 1].level) + "].");
|
std::to_string(cf_meta.levels[cf_meta.levels.size() - 1].level) + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (output_level > MaxOutputLevel()) {
|
if (output_level > MaxOutputLevel()) {
|
||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"Exceed the maximum output level defined by "
|
"Exceed the maximum output level defined by "
|
||||||
"the current compaction algorithm --- " +
|
"the current compaction algorithm --- " +
|
||||||
ToString(MaxOutputLevel()));
|
std::to_string(MaxOutputLevel()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (output_level < 0) {
|
if (output_level < 0) {
|
||||||
@ -1061,8 +1061,8 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
|
|||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"Cannot compact file to up level, input file: " +
|
"Cannot compact file to up level, input file: " +
|
||||||
MakeTableFileName("", file_num) + " level " +
|
MakeTableFileName("", file_num) + " level " +
|
||||||
ToString(input_file_level) + " > output level " +
|
std::to_string(input_file_level) + " > output level " +
|
||||||
ToString(output_level));
|
std::to_string(output_level));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -504,7 +504,7 @@ bool LevelCompactionBuilder::PickIntraL0Compaction() {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction,
|
return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction,
|
||||||
port::kMaxUint64,
|
std::numeric_limits<uint64_t>::max(),
|
||||||
mutable_cf_options_.max_compaction_bytes,
|
mutable_cf_options_.max_compaction_bytes,
|
||||||
&start_level_inputs_, earliest_mem_seqno_);
|
&start_level_inputs_, earliest_mem_seqno_);
|
||||||
}
|
}
|
||||||
|
@ -273,9 +273,9 @@ TEST_F(CompactionPickerTest, NeedsCompactionLevel) {
|
|||||||
// start a brand new version in each test.
|
// start a brand new version in each test.
|
||||||
NewVersionStorage(kLevels, kCompactionStyleLevel);
|
NewVersionStorage(kLevels, kCompactionStyleLevel);
|
||||||
for (int i = 0; i < file_count; ++i) {
|
for (int i = 0; i < file_count; ++i) {
|
||||||
Add(level, i, ToString((i + 100) * 1000).c_str(),
|
Add(level, i, std::to_string((i + 100) * 1000).c_str(),
|
||||||
ToString((i + 100) * 1000 + 999).c_str(),
|
std::to_string((i + 100) * 1000 + 999).c_str(), file_size, 0,
|
||||||
file_size, 0, i * 100, i * 100 + 99);
|
i * 100, i * 100 + 99);
|
||||||
}
|
}
|
||||||
UpdateVersionStorageInfo();
|
UpdateVersionStorageInfo();
|
||||||
ASSERT_EQ(vstorage_->CompactionScoreLevel(0), level);
|
ASSERT_EQ(vstorage_->CompactionScoreLevel(0), level);
|
||||||
@ -439,8 +439,8 @@ TEST_F(CompactionPickerTest, NeedsCompactionUniversal) {
|
|||||||
for (int i = 1;
|
for (int i = 1;
|
||||||
i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) {
|
i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) {
|
||||||
NewVersionStorage(1, kCompactionStyleUniversal);
|
NewVersionStorage(1, kCompactionStyleUniversal);
|
||||||
Add(0, i, ToString((i + 100) * 1000).c_str(),
|
Add(0, i, std::to_string((i + 100) * 1000).c_str(),
|
||||||
ToString((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100,
|
std::to_string((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100,
|
||||||
i * 100 + 99);
|
i * 100 + 99);
|
||||||
UpdateVersionStorageInfo();
|
UpdateVersionStorageInfo();
|
||||||
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
||||||
@ -852,17 +852,17 @@ TEST_F(CompactionPickerTest, UniversalIncrementalSpace4) {
|
|||||||
// L3: (1101, 1180) (1201, 1280) ... (7901, 7908)
|
// L3: (1101, 1180) (1201, 1280) ... (7901, 7908)
|
||||||
// L4: (1130, 1150) (1160, 1210) (1230, 1250) (1260 1310) ... (7960, 8010)
|
// L4: (1130, 1150) (1160, 1210) (1230, 1250) (1260 1310) ... (7960, 8010)
|
||||||
for (int i = 11; i < 79; i++) {
|
for (int i = 11; i < 79; i++) {
|
||||||
Add(3, 100 + i * 3, ToString(i * 100).c_str(),
|
Add(3, 100 + i * 3, std::to_string(i * 100).c_str(),
|
||||||
ToString(i * 100 + 80).c_str(), kFileSize, 0, 200, 251);
|
std::to_string(i * 100 + 80).c_str(), kFileSize, 0, 200, 251);
|
||||||
// Add a tie breaker
|
// Add a tie breaker
|
||||||
if (i == 66) {
|
if (i == 66) {
|
||||||
Add(3, 10000U, "6690", "6699", kFileSize, 0, 200, 251);
|
Add(3, 10000U, "6690", "6699", kFileSize, 0, 200, 251);
|
||||||
}
|
}
|
||||||
|
|
||||||
Add(4, 100 + i * 3 + 1, ToString(i * 100 + 30).c_str(),
|
Add(4, 100 + i * 3 + 1, std::to_string(i * 100 + 30).c_str(),
|
||||||
ToString(i * 100 + 50).c_str(), kFileSize, 0, 200, 251);
|
std::to_string(i * 100 + 50).c_str(), kFileSize, 0, 200, 251);
|
||||||
Add(4, 100 + i * 3 + 2, ToString(i * 100 + 60).c_str(),
|
Add(4, 100 + i * 3 + 2, std::to_string(i * 100 + 60).c_str(),
|
||||||
ToString(i * 100 + 110).c_str(), kFileSize, 0, 200, 251);
|
std::to_string(i * 100 + 110).c_str(), kFileSize, 0, 200, 251);
|
||||||
}
|
}
|
||||||
UpdateVersionStorageInfo();
|
UpdateVersionStorageInfo();
|
||||||
|
|
||||||
@ -899,14 +899,14 @@ TEST_F(CompactionPickerTest, UniversalIncrementalSpace5) {
|
|||||||
// L3: (1101, 1180) (1201, 1280) ... (7901, 7908)
|
// L3: (1101, 1180) (1201, 1280) ... (7901, 7908)
|
||||||
// L4: (1130, 1150) (1160, 1210) (1230, 1250) (1260 1310) ... (7960, 8010)
|
// L4: (1130, 1150) (1160, 1210) (1230, 1250) (1260 1310) ... (7960, 8010)
|
||||||
for (int i = 11; i < 70; i++) {
|
for (int i = 11; i < 70; i++) {
|
||||||
Add(3, 100 + i * 3, ToString(i * 100).c_str(),
|
Add(3, 100 + i * 3, std::to_string(i * 100).c_str(),
|
||||||
ToString(i * 100 + 80).c_str(),
|
std::to_string(i * 100 + 80).c_str(),
|
||||||
i % 10 == 9 ? kFileSize * 100 : kFileSize, 0, 200, 251);
|
i % 10 == 9 ? kFileSize * 100 : kFileSize, 0, 200, 251);
|
||||||
|
|
||||||
Add(4, 100 + i * 3 + 1, ToString(i * 100 + 30).c_str(),
|
Add(4, 100 + i * 3 + 1, std::to_string(i * 100 + 30).c_str(),
|
||||||
ToString(i * 100 + 50).c_str(), kFileSize, 0, 200, 251);
|
std::to_string(i * 100 + 50).c_str(), kFileSize, 0, 200, 251);
|
||||||
Add(4, 100 + i * 3 + 2, ToString(i * 100 + 60).c_str(),
|
Add(4, 100 + i * 3 + 2, std::to_string(i * 100 + 60).c_str(),
|
||||||
ToString(i * 100 + 110).c_str(), kFileSize, 0, 200, 251);
|
std::to_string(i * 100 + 110).c_str(), kFileSize, 0, 200, 251);
|
||||||
}
|
}
|
||||||
UpdateVersionStorageInfo();
|
UpdateVersionStorageInfo();
|
||||||
|
|
||||||
@ -941,8 +941,8 @@ TEST_F(CompactionPickerTest, NeedsCompactionFIFO) {
|
|||||||
// size of L0 files.
|
// size of L0 files.
|
||||||
for (int i = 1; i <= kFileCount; ++i) {
|
for (int i = 1; i <= kFileCount; ++i) {
|
||||||
NewVersionStorage(1, kCompactionStyleFIFO);
|
NewVersionStorage(1, kCompactionStyleFIFO);
|
||||||
Add(0, i, ToString((i + 100) * 1000).c_str(),
|
Add(0, i, std::to_string((i + 100) * 1000).c_str(),
|
||||||
ToString((i + 100) * 1000 + 999).c_str(), kFileSize, 0, i * 100,
|
std::to_string((i + 100) * 1000 + 999).c_str(), kFileSize, 0, i * 100,
|
||||||
i * 100 + 99);
|
i * 100 + 99);
|
||||||
UpdateVersionStorageInfo();
|
UpdateVersionStorageInfo();
|
||||||
ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()),
|
ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()),
|
||||||
@ -2653,8 +2653,8 @@ TEST_F(CompactionPickerTest, UniversalMarkedManualCompaction) {
|
|||||||
universal_compaction_picker.CompactRange(
|
universal_compaction_picker.CompactRange(
|
||||||
cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(),
|
cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(),
|
||||||
ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(),
|
ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(),
|
||||||
nullptr, nullptr, &manual_end, &manual_conflict, port::kMaxUint64,
|
nullptr, nullptr, &manual_end, &manual_conflict,
|
||||||
""));
|
std::numeric_limits<uint64_t>::max(), ""));
|
||||||
|
|
||||||
ASSERT_TRUE(compaction);
|
ASSERT_TRUE(compaction);
|
||||||
|
|
||||||
|
@ -1371,7 +1371,7 @@ Compaction* UniversalCompactionBuilder::PickPeriodicCompaction() {
|
|||||||
|
|
||||||
uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const {
|
uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const {
|
||||||
if (!mutable_cf_options_.compaction_options_universal.incremental) {
|
if (!mutable_cf_options_.compaction_options_universal.incremental) {
|
||||||
return port::kMaxUint64;
|
return std::numeric_limits<uint64_t>::max();
|
||||||
} else {
|
} else {
|
||||||
// Try to align cutting boundary with files at the next level if the
|
// Try to align cutting boundary with files at the next level if the
|
||||||
// file isn't end up with 1/2 of target size, or it would overlap
|
// file isn't end up with 1/2 of target size, or it would overlap
|
||||||
|
@ -12,13 +12,16 @@ namespace ROCKSDB_NAMESPACE {
|
|||||||
|
|
||||||
class MyTestCompactionService : public CompactionService {
|
class MyTestCompactionService : public CompactionService {
|
||||||
public:
|
public:
|
||||||
MyTestCompactionService(std::string db_path, Options& options,
|
MyTestCompactionService(
|
||||||
std::shared_ptr<Statistics>& statistics)
|
std::string db_path, Options& options,
|
||||||
|
std::shared_ptr<Statistics>& statistics,
|
||||||
|
std::vector<std::shared_ptr<EventListener>>& listeners)
|
||||||
: db_path_(std::move(db_path)),
|
: db_path_(std::move(db_path)),
|
||||||
options_(options),
|
options_(options),
|
||||||
statistics_(statistics),
|
statistics_(statistics),
|
||||||
start_info_("na", "na", "na", 0, Env::TOTAL),
|
start_info_("na", "na", "na", 0, Env::TOTAL),
|
||||||
wait_info_("na", "na", "na", 0, Env::TOTAL) {}
|
wait_info_("na", "na", "na", 0, Env::TOTAL),
|
||||||
|
listeners_(listeners) {}
|
||||||
|
|
||||||
static const char* kClassName() { return "MyTestCompactionService"; }
|
static const char* kClassName() { return "MyTestCompactionService"; }
|
||||||
|
|
||||||
@ -71,9 +74,15 @@ class MyTestCompactionService : public CompactionService {
|
|||||||
options_override.table_factory = options_.table_factory;
|
options_override.table_factory = options_.table_factory;
|
||||||
options_override.sst_partitioner_factory = options_.sst_partitioner_factory;
|
options_override.sst_partitioner_factory = options_.sst_partitioner_factory;
|
||||||
options_override.statistics = statistics_;
|
options_override.statistics = statistics_;
|
||||||
|
if (!listeners_.empty()) {
|
||||||
|
options_override.listeners = listeners_;
|
||||||
|
}
|
||||||
|
|
||||||
|
OpenAndCompactOptions options;
|
||||||
|
options.canceled = &canceled_;
|
||||||
|
|
||||||
Status s = DB::OpenAndCompact(
|
Status s = DB::OpenAndCompact(
|
||||||
db_path_, db_path_ + "/" + ROCKSDB_NAMESPACE::ToString(info.job_id),
|
options, db_path_, db_path_ + "/" + std::to_string(info.job_id),
|
||||||
compaction_input, compaction_service_result, options_override);
|
compaction_input, compaction_service_result, options_override);
|
||||||
if (is_override_wait_result_) {
|
if (is_override_wait_result_) {
|
||||||
*compaction_service_result = override_wait_result_;
|
*compaction_service_result = override_wait_result_;
|
||||||
@ -112,6 +121,8 @@ class MyTestCompactionService : public CompactionService {
|
|||||||
is_override_wait_status_ = false;
|
is_override_wait_status_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SetCanceled(bool canceled) { canceled_ = canceled; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
InstrumentedMutex mutex_;
|
InstrumentedMutex mutex_;
|
||||||
std::atomic_int compaction_num_{0};
|
std::atomic_int compaction_num_{0};
|
||||||
@ -129,6 +140,8 @@ class MyTestCompactionService : public CompactionService {
|
|||||||
CompactionServiceJobStatus::kFailure;
|
CompactionServiceJobStatus::kFailure;
|
||||||
bool is_override_wait_result_ = false;
|
bool is_override_wait_result_ = false;
|
||||||
std::string override_wait_result_;
|
std::string override_wait_result_;
|
||||||
|
std::vector<std::shared_ptr<EventListener>> listeners_;
|
||||||
|
std::atomic_bool canceled_{false};
|
||||||
};
|
};
|
||||||
|
|
||||||
class CompactionServiceTest : public DBTestBase {
|
class CompactionServiceTest : public DBTestBase {
|
||||||
@ -144,7 +157,7 @@ class CompactionServiceTest : public DBTestBase {
|
|||||||
compactor_statistics_ = CreateDBStatistics();
|
compactor_statistics_ = CreateDBStatistics();
|
||||||
|
|
||||||
compaction_service_ = std::make_shared<MyTestCompactionService>(
|
compaction_service_ = std::make_shared<MyTestCompactionService>(
|
||||||
dbname_, *options, compactor_statistics_);
|
dbname_, *options, compactor_statistics_, remote_listeners);
|
||||||
options->compaction_service = compaction_service_;
|
options->compaction_service = compaction_service_;
|
||||||
DestroyAndReopen(*options);
|
DestroyAndReopen(*options);
|
||||||
}
|
}
|
||||||
@ -163,7 +176,7 @@ class CompactionServiceTest : public DBTestBase {
|
|||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -173,7 +186,7 @@ class CompactionServiceTest : public DBTestBase {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -185,13 +198,15 @@ class CompactionServiceTest : public DBTestBase {
|
|||||||
for (int i = 0; i < 200; i++) {
|
for (int i = 0; i < 200; i++) {
|
||||||
auto result = Get(Key(i));
|
auto result = Get(Key(i));
|
||||||
if (i % 2) {
|
if (i % 2) {
|
||||||
ASSERT_EQ(result, "value" + ToString(i));
|
ASSERT_EQ(result, "value" + std::to_string(i));
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(result, "value_new" + ToString(i));
|
ASSERT_EQ(result, "value_new" + std::to_string(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<std::shared_ptr<EventListener>> remote_listeners;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::shared_ptr<Statistics> compactor_statistics_;
|
std::shared_ptr<Statistics> compactor_statistics_;
|
||||||
std::shared_ptr<Statistics> primary_statistics_;
|
std::shared_ptr<Statistics> primary_statistics_;
|
||||||
@ -208,7 +223,7 @@ TEST_F(CompactionServiceTest, BasicCompactions) {
|
|||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -216,7 +231,7 @@ TEST_F(CompactionServiceTest, BasicCompactions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -226,9 +241,9 @@ TEST_F(CompactionServiceTest, BasicCompactions) {
|
|||||||
for (int i = 0; i < 200; i++) {
|
for (int i = 0; i < 200; i++) {
|
||||||
auto result = Get(Key(i));
|
auto result = Get(Key(i));
|
||||||
if (i % 2) {
|
if (i % 2) {
|
||||||
ASSERT_EQ(result, "value" + ToString(i));
|
ASSERT_EQ(result, "value" + std::to_string(i));
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(result, "value_new" + ToString(i));
|
ASSERT_EQ(result, "value_new" + std::to_string(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto my_cs = GetCompactionService();
|
auto my_cs = GetCompactionService();
|
||||||
@ -265,7 +280,7 @@ TEST_F(CompactionServiceTest, BasicCompactions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
s = Put(Key(key_id), "value_new" + ToString(key_id));
|
s = Put(Key(key_id), "value_new" + std::to_string(key_id));
|
||||||
if (s.IsAborted()) {
|
if (s.IsAborted()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -322,6 +337,51 @@ TEST_F(CompactionServiceTest, ManualCompaction) {
|
|||||||
VerifyTestData();
|
VerifyTestData();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(CompactionServiceTest, CancelCompactionOnRemoteSide) {
|
||||||
|
Options options = CurrentOptions();
|
||||||
|
options.disable_auto_compactions = true;
|
||||||
|
ReopenWithCompactionService(&options);
|
||||||
|
GenerateTestData();
|
||||||
|
|
||||||
|
auto my_cs = GetCompactionService();
|
||||||
|
|
||||||
|
std::string start_str = Key(15);
|
||||||
|
std::string end_str = Key(45);
|
||||||
|
Slice start(start_str);
|
||||||
|
Slice end(end_str);
|
||||||
|
uint64_t comp_num = my_cs->GetCompactionNum();
|
||||||
|
|
||||||
|
// Test cancel compaction at the beginning
|
||||||
|
my_cs->SetCanceled(true);
|
||||||
|
auto s = db_->CompactRange(CompactRangeOptions(), &start, &end);
|
||||||
|
ASSERT_TRUE(s.IsIncomplete());
|
||||||
|
// compaction number is not increased
|
||||||
|
ASSERT_GE(my_cs->GetCompactionNum(), comp_num);
|
||||||
|
VerifyTestData();
|
||||||
|
|
||||||
|
// Test cancel compaction in progress
|
||||||
|
ReopenWithCompactionService(&options);
|
||||||
|
GenerateTestData();
|
||||||
|
my_cs = GetCompactionService();
|
||||||
|
my_cs->SetCanceled(false);
|
||||||
|
|
||||||
|
std::atomic_bool cancel_issued{false};
|
||||||
|
SyncPoint::GetInstance()->SetCallBack("CompactionJob::Run():Inprogress",
|
||||||
|
[&](void* /*arg*/) {
|
||||||
|
cancel_issued = true;
|
||||||
|
my_cs->SetCanceled(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
|
s = db_->CompactRange(CompactRangeOptions(), &start, &end);
|
||||||
|
ASSERT_TRUE(s.IsIncomplete());
|
||||||
|
ASSERT_TRUE(cancel_issued);
|
||||||
|
// compaction number is not increased
|
||||||
|
ASSERT_GE(my_cs->GetCompactionNum(), comp_num);
|
||||||
|
VerifyTestData();
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(CompactionServiceTest, FailedToStart) {
|
TEST_F(CompactionServiceTest, FailedToStart) {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
options.disable_auto_compactions = true;
|
options.disable_auto_compactions = true;
|
||||||
@ -407,7 +467,7 @@ TEST_F(CompactionServiceTest, CompactionFilter) {
|
|||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -415,7 +475,7 @@ TEST_F(CompactionServiceTest, CompactionFilter) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -429,9 +489,9 @@ TEST_F(CompactionServiceTest, CompactionFilter) {
|
|||||||
if (i > 5 && i <= 105) {
|
if (i > 5 && i <= 105) {
|
||||||
ASSERT_EQ(result, "NOT_FOUND");
|
ASSERT_EQ(result, "NOT_FOUND");
|
||||||
} else if (i % 2) {
|
} else if (i % 2) {
|
||||||
ASSERT_EQ(result, "value" + ToString(i));
|
ASSERT_EQ(result, "value" + std::to_string(i));
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(result, "value_new" + ToString(i));
|
ASSERT_EQ(result, "value_new" + std::to_string(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto my_cs = GetCompactionService();
|
auto my_cs = GetCompactionService();
|
||||||
@ -486,9 +546,9 @@ TEST_F(CompactionServiceTest, ConcurrentCompaction) {
|
|||||||
for (int i = 0; i < 200; i++) {
|
for (int i = 0; i < 200; i++) {
|
||||||
auto result = Get(Key(i));
|
auto result = Get(Key(i));
|
||||||
if (i % 2) {
|
if (i % 2) {
|
||||||
ASSERT_EQ(result, "value" + ToString(i));
|
ASSERT_EQ(result, "value" + std::to_string(i));
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(result, "value_new" + ToString(i));
|
ASSERT_EQ(result, "value_new" + std::to_string(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto my_cs = GetCompactionService();
|
auto my_cs = GetCompactionService();
|
||||||
@ -503,7 +563,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) {
|
|||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -511,7 +571,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -556,7 +616,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) {
|
|||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -564,7 +624,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) {
|
|||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -592,7 +652,7 @@ TEST_F(CompactionServiceTest, FallbackLocalAuto) {
|
|||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -600,7 +660,7 @@ TEST_F(CompactionServiceTest, FallbackLocalAuto) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -610,9 +670,9 @@ TEST_F(CompactionServiceTest, FallbackLocalAuto) {
|
|||||||
for (int i = 0; i < 200; i++) {
|
for (int i = 0; i < 200; i++) {
|
||||||
auto result = Get(Key(i));
|
auto result = Get(Key(i));
|
||||||
if (i % 2) {
|
if (i % 2) {
|
||||||
ASSERT_EQ(result, "value" + ToString(i));
|
ASSERT_EQ(result, "value" + std::to_string(i));
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(result, "value_new" + ToString(i));
|
ASSERT_EQ(result, "value_new" + std::to_string(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -685,6 +745,88 @@ TEST_F(CompactionServiceTest, FallbackLocalManual) {
|
|||||||
VerifyTestData();
|
VerifyTestData();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(CompactionServiceTest, RemoteEventListener) {
|
||||||
|
class RemoteEventListenerTest : public EventListener {
|
||||||
|
public:
|
||||||
|
const char* Name() const override { return "RemoteEventListenerTest"; }
|
||||||
|
|
||||||
|
void OnSubcompactionBegin(const SubcompactionJobInfo& info) override {
|
||||||
|
auto result = on_going_compactions.emplace(info.job_id);
|
||||||
|
ASSERT_TRUE(result.second); // make sure there's no duplication
|
||||||
|
compaction_num++;
|
||||||
|
EventListener::OnSubcompactionBegin(info);
|
||||||
|
}
|
||||||
|
void OnSubcompactionCompleted(const SubcompactionJobInfo& info) override {
|
||||||
|
auto num = on_going_compactions.erase(info.job_id);
|
||||||
|
ASSERT_TRUE(num == 1); // make sure the compaction id exists
|
||||||
|
EventListener::OnSubcompactionCompleted(info);
|
||||||
|
}
|
||||||
|
void OnTableFileCreated(const TableFileCreationInfo& info) override {
|
||||||
|
ASSERT_EQ(on_going_compactions.count(info.job_id), 1);
|
||||||
|
file_created++;
|
||||||
|
EventListener::OnTableFileCreated(info);
|
||||||
|
}
|
||||||
|
void OnTableFileCreationStarted(
|
||||||
|
const TableFileCreationBriefInfo& info) override {
|
||||||
|
ASSERT_EQ(on_going_compactions.count(info.job_id), 1);
|
||||||
|
file_creation_started++;
|
||||||
|
EventListener::OnTableFileCreationStarted(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ShouldBeNotifiedOnFileIO() override {
|
||||||
|
file_io_notified++;
|
||||||
|
return EventListener::ShouldBeNotifiedOnFileIO();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::atomic_uint64_t file_io_notified{0};
|
||||||
|
std::atomic_uint64_t file_creation_started{0};
|
||||||
|
std::atomic_uint64_t file_created{0};
|
||||||
|
|
||||||
|
std::set<int> on_going_compactions; // store the job_id
|
||||||
|
std::atomic_uint64_t compaction_num{0};
|
||||||
|
};
|
||||||
|
|
||||||
|
auto listener = new RemoteEventListenerTest();
|
||||||
|
remote_listeners.emplace_back(listener);
|
||||||
|
|
||||||
|
Options options = CurrentOptions();
|
||||||
|
ReopenWithCompactionService(&options);
|
||||||
|
|
||||||
|
for (int i = 0; i < 20; i++) {
|
||||||
|
for (int j = 0; j < 10; j++) {
|
||||||
|
int key_id = i * 10 + j;
|
||||||
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
|
}
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
for (int j = 0; j < 10; j++) {
|
||||||
|
int key_id = i * 20 + j * 2;
|
||||||
|
ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id)));
|
||||||
|
}
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
}
|
||||||
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
|
// check the events are triggered
|
||||||
|
ASSERT_TRUE(listener->file_io_notified > 0);
|
||||||
|
ASSERT_TRUE(listener->file_creation_started > 0);
|
||||||
|
ASSERT_TRUE(listener->file_created > 0);
|
||||||
|
ASSERT_TRUE(listener->compaction_num > 0);
|
||||||
|
ASSERT_TRUE(listener->on_going_compactions.empty());
|
||||||
|
|
||||||
|
// verify result
|
||||||
|
for (int i = 0; i < 200; i++) {
|
||||||
|
auto result = Get(Key(i));
|
||||||
|
if (i % 2) {
|
||||||
|
ASSERT_EQ(result, "value" + std::to_string(i));
|
||||||
|
} else {
|
||||||
|
ASSERT_EQ(result, "value_new" + std::to_string(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
@ -397,7 +397,7 @@ TEST_P(ComparatorDBTest, DoubleComparator) {
|
|||||||
for (uint32_t j = 0; j < divide_order; j++) {
|
for (uint32_t j = 0; j < divide_order; j++) {
|
||||||
to_divide *= 10.0;
|
to_divide *= 10.0;
|
||||||
}
|
}
|
||||||
source_strings.push_back(ToString(r / to_divide));
|
source_strings.push_back(std::to_string(r / to_divide));
|
||||||
}
|
}
|
||||||
|
|
||||||
DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
|
DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
|
||||||
|
@ -40,7 +40,8 @@ Status VerifySstFileChecksum(const Options& options,
|
|||||||
Status VerifySstFileChecksum(const Options& options,
|
Status VerifySstFileChecksum(const Options& options,
|
||||||
const EnvOptions& env_options,
|
const EnvOptions& env_options,
|
||||||
const ReadOptions& read_options,
|
const ReadOptions& read_options,
|
||||||
const std::string& file_path) {
|
const std::string& file_path,
|
||||||
|
const SequenceNumber& largest_seqno) {
|
||||||
std::unique_ptr<FSRandomAccessFile> file;
|
std::unique_ptr<FSRandomAccessFile> file;
|
||||||
uint64_t file_size;
|
uint64_t file_size;
|
||||||
InternalKeyComparator internal_comparator(options.comparator);
|
InternalKeyComparator internal_comparator(options.comparator);
|
||||||
@ -61,12 +62,13 @@ Status VerifySstFileChecksum(const Options& options,
|
|||||||
nullptr /* stats */, 0 /* hist_type */, nullptr /* file_read_hist */,
|
nullptr /* stats */, 0 /* hist_type */, nullptr /* file_read_hist */,
|
||||||
ioptions.rate_limiter.get()));
|
ioptions.rate_limiter.get()));
|
||||||
const bool kImmortal = true;
|
const bool kImmortal = true;
|
||||||
|
auto reader_options = TableReaderOptions(
|
||||||
|
ioptions, options.prefix_extractor, env_options, internal_comparator,
|
||||||
|
false /* skip_filters */, !kImmortal, false /* force_direct_prefetch */,
|
||||||
|
-1 /* level */);
|
||||||
|
reader_options.largest_seqno = largest_seqno;
|
||||||
s = ioptions.table_factory->NewTableReader(
|
s = ioptions.table_factory->NewTableReader(
|
||||||
TableReaderOptions(ioptions, options.prefix_extractor, env_options,
|
reader_options, std::move(file_reader), file_size, &table_reader,
|
||||||
internal_comparator, false /* skip_filters */,
|
|
||||||
!kImmortal, false /* force_direct_prefetch */,
|
|
||||||
-1 /* level */),
|
|
||||||
std::move(file_reader), file_size, &table_reader,
|
|
||||||
false /* prefetch_index_and_filter_in_cache */);
|
false /* prefetch_index_and_filter_in_cache */);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "rocksdb/db.h"
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/table.h"
|
#include "rocksdb/table.h"
|
||||||
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
#include "rocksdb/write_batch.h"
|
#include "rocksdb/write_batch.h"
|
||||||
#include "table/block_based/block_based_table_builder.h"
|
#include "table/block_based/block_based_table_builder.h"
|
||||||
#include "table/meta_blocks.h"
|
#include "table/meta_blocks.h"
|
||||||
@ -275,6 +276,42 @@ class CorruptionTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
return Slice(*storage);
|
return Slice(*storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GetSortedWalFiles(std::vector<uint64_t>& file_nums) {
|
||||||
|
std::vector<std::string> tmp_files;
|
||||||
|
ASSERT_OK(env_->GetChildren(dbname_, &tmp_files));
|
||||||
|
FileType type = kWalFile;
|
||||||
|
for (const auto& file : tmp_files) {
|
||||||
|
uint64_t number = 0;
|
||||||
|
if (ParseFileName(file, &number, &type) && type == kWalFile) {
|
||||||
|
file_nums.push_back(number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::sort(file_nums.begin(), file_nums.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
void CorruptFileWithTruncation(FileType file, uint64_t number,
|
||||||
|
uint64_t bytes_to_truncate = 0) {
|
||||||
|
std::string path;
|
||||||
|
switch (file) {
|
||||||
|
case FileType::kWalFile:
|
||||||
|
path = LogFileName(dbname_, number);
|
||||||
|
break;
|
||||||
|
// TODO: Add other file types as this method is being used for those file
|
||||||
|
// types.
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
uint64_t old_size = 0;
|
||||||
|
ASSERT_OK(env_->GetFileSize(path, &old_size));
|
||||||
|
assert(old_size > bytes_to_truncate);
|
||||||
|
uint64_t new_size = old_size - bytes_to_truncate;
|
||||||
|
// If bytes_to_truncate == 0, it will do full truncation.
|
||||||
|
if (bytes_to_truncate == 0) {
|
||||||
|
new_size = 0;
|
||||||
|
}
|
||||||
|
ASSERT_OK(test::TruncateFile(env_, path, new_size));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(CorruptionTest, Recovery) {
|
TEST_F(CorruptionTest, Recovery) {
|
||||||
@ -300,6 +337,72 @@ TEST_F(CorruptionTest, Recovery) {
|
|||||||
Check(36, 36);
|
Check(36, 36);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(CorruptionTest, PostPITRCorruptionWALsRetained) {
|
||||||
|
// Repro for bug where WALs following the point-in-time recovery were not
|
||||||
|
// retained leading to the next recovery failing.
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
options_.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
|
||||||
|
|
||||||
|
const std::string test_cf_name = "test_cf";
|
||||||
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
||||||
|
cf_descs.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions());
|
||||||
|
cf_descs.emplace_back(test_cf_name, ColumnFamilyOptions());
|
||||||
|
|
||||||
|
uint64_t log_num;
|
||||||
|
{
|
||||||
|
options_.create_missing_column_families = true;
|
||||||
|
std::vector<ColumnFamilyHandle*> cfhs;
|
||||||
|
ASSERT_OK(DB::Open(options_, dbname_, cf_descs, &cfhs, &db_));
|
||||||
|
assert(db_ != nullptr); // suppress false clang-analyze report
|
||||||
|
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), cfhs[0], "k", "v"));
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), cfhs[1], "k", "v"));
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), cfhs[0], "k2", "v2"));
|
||||||
|
std::vector<uint64_t> file_nums;
|
||||||
|
GetSortedWalFiles(file_nums);
|
||||||
|
log_num = file_nums.back();
|
||||||
|
for (auto* cfh : cfhs) {
|
||||||
|
delete cfh;
|
||||||
|
}
|
||||||
|
CloseDb();
|
||||||
|
}
|
||||||
|
|
||||||
|
CorruptFileWithTruncation(FileType::kWalFile, log_num,
|
||||||
|
/*bytes_to_truncate=*/1);
|
||||||
|
|
||||||
|
{
|
||||||
|
// Recover "k" -> "v" for both CFs. "k2" -> "v2" is lost due to truncation.
|
||||||
|
options_.avoid_flush_during_recovery = true;
|
||||||
|
std::vector<ColumnFamilyHandle*> cfhs;
|
||||||
|
ASSERT_OK(DB::Open(options_, dbname_, cf_descs, &cfhs, &db_));
|
||||||
|
assert(db_ != nullptr); // suppress false clang-analyze report
|
||||||
|
|
||||||
|
// Flush one but not both CFs and write some data so there's a seqno gap
|
||||||
|
// between the PITR corruption and the next DB session's first WAL.
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), cfhs[1], "k2", "v2"));
|
||||||
|
ASSERT_OK(db_->Flush(FlushOptions(), cfhs[1]));
|
||||||
|
|
||||||
|
for (auto* cfh : cfhs) {
|
||||||
|
delete cfh;
|
||||||
|
}
|
||||||
|
CloseDb();
|
||||||
|
}
|
||||||
|
|
||||||
|
// With the bug, this DB open would remove the WALs following the PITR
|
||||||
|
// corruption. Then, the next recovery would fail.
|
||||||
|
for (int i = 0; i < 2; ++i) {
|
||||||
|
std::vector<ColumnFamilyHandle*> cfhs;
|
||||||
|
ASSERT_OK(DB::Open(options_, dbname_, cf_descs, &cfhs, &db_));
|
||||||
|
assert(db_ != nullptr); // suppress false clang-analyze report
|
||||||
|
|
||||||
|
for (auto* cfh : cfhs) {
|
||||||
|
delete cfh;
|
||||||
|
}
|
||||||
|
CloseDb();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(CorruptionTest, RecoverWriteError) {
|
TEST_F(CorruptionTest, RecoverWriteError) {
|
||||||
env_->writable_file_error_ = true;
|
env_->writable_file_error_ = true;
|
||||||
Status s = TryReopen();
|
Status s = TryReopen();
|
||||||
@ -912,6 +1015,480 @@ TEST_F(CorruptionTest, VerifyWholeTableChecksum) {
|
|||||||
ASSERT_EQ(1, count);
|
ASSERT_EQ(1, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class CrashDuringRecoveryWithCorruptionTest
|
||||||
|
: public CorruptionTest,
|
||||||
|
public testing::WithParamInterface<std::tuple<bool, bool>> {
|
||||||
|
public:
|
||||||
|
explicit CrashDuringRecoveryWithCorruptionTest()
|
||||||
|
: CorruptionTest(),
|
||||||
|
avoid_flush_during_recovery_(std::get<0>(GetParam())),
|
||||||
|
track_and_verify_wals_in_manifest_(std::get<1>(GetParam())) {}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const bool avoid_flush_during_recovery_;
|
||||||
|
const bool track_and_verify_wals_in_manifest_;
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_CASE_P(CorruptionTest, CrashDuringRecoveryWithCorruptionTest,
|
||||||
|
::testing::Values(std::make_tuple(true, false),
|
||||||
|
std::make_tuple(false, false),
|
||||||
|
std::make_tuple(true, true),
|
||||||
|
std::make_tuple(false, true)));
|
||||||
|
|
||||||
|
// In case of non-TransactionDB with avoid_flush_during_recovery = true, RocksDB
|
||||||
|
// won't flush the data from WAL to L0 for all column families if possible. As a
|
||||||
|
// result, not all column families can increase their log_numbers, and
|
||||||
|
// min_log_number_to_keep won't change.
|
||||||
|
// It may prematurely persist a new MANIFEST even before we can declare the DB
|
||||||
|
// is in consistent state after recovery (this is when the new WAL is synced)
|
||||||
|
// and advances log_numbers for some column families.
|
||||||
|
//
|
||||||
|
// If there is power failure before we sync the new WAL, we will end up in
|
||||||
|
// a situation in which after persisting the MANIFEST, RocksDB will see some
|
||||||
|
// column families' log_numbers larger than the corrupted wal, and
|
||||||
|
// "Column family inconsistency: SST file contains data beyond the point of
|
||||||
|
// corruption" error will be hit, causing recovery to fail.
|
||||||
|
//
|
||||||
|
// After adding the fix, only after new WAL is synced, RocksDB persist a new
|
||||||
|
// MANIFEST with column families to ensure RocksDB is in consistent state.
|
||||||
|
// RocksDB writes an empty WriteBatch as a sentinel to the new WAL which is
|
||||||
|
// synced immediately afterwards. The sequence number of the sentinel
|
||||||
|
// WriteBatch will be the next sequence number immediately after the largest
|
||||||
|
// sequence number recovered from previous WALs and MANIFEST because of which DB
|
||||||
|
// will be in consistent state.
|
||||||
|
// If a future recovery starts from the new MANIFEST, then it means the new WAL
|
||||||
|
// is successfully synced. Due to the sentinel empty write batch at the
|
||||||
|
// beginning, kPointInTimeRecovery of WAL is guaranteed to go after this point.
|
||||||
|
// If future recovery starts from the old MANIFEST, it means the writing the new
|
||||||
|
// MANIFEST failed. It won't have the "SST ahead of WAL" error.
|
||||||
|
//
|
||||||
|
// The combination of corrupting a WAL and injecting an error during subsequent
|
||||||
|
// re-open exposes the bug of prematurely persisting a new MANIFEST with
|
||||||
|
// advanced ColumnFamilyData::log_number.
|
||||||
|
TEST_P(CrashDuringRecoveryWithCorruptionTest, DISABLED_CrashDuringRecovery) {
|
||||||
|
CloseDb();
|
||||||
|
Options options;
|
||||||
|
options.track_and_verify_wals_in_manifest =
|
||||||
|
track_and_verify_wals_in_manifest_;
|
||||||
|
options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
|
||||||
|
options.avoid_flush_during_recovery = false;
|
||||||
|
options.env = env_;
|
||||||
|
ASSERT_OK(DestroyDB(dbname_, options));
|
||||||
|
options.create_if_missing = true;
|
||||||
|
options.max_write_buffer_number = 8;
|
||||||
|
|
||||||
|
Reopen(&options);
|
||||||
|
Status s;
|
||||||
|
const std::string test_cf_name = "test_cf";
|
||||||
|
ColumnFamilyHandle* cfh = nullptr;
|
||||||
|
s = db_->CreateColumnFamily(options, test_cf_name, &cfh);
|
||||||
|
ASSERT_OK(s);
|
||||||
|
delete cfh;
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
||||||
|
cf_descs.emplace_back(kDefaultColumnFamilyName, options);
|
||||||
|
cf_descs.emplace_back(test_cf_name, options);
|
||||||
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
|
|
||||||
|
// 1. Open and populate the DB. Write and flush default_cf several times to
|
||||||
|
// advance wal number so that some column families have advanced log_number
|
||||||
|
// while other don't.
|
||||||
|
{
|
||||||
|
ASSERT_OK(DB::Open(options, dbname_, cf_descs, &handles, &db_));
|
||||||
|
auto* dbimpl = static_cast_with_check<DBImpl>(db_);
|
||||||
|
assert(dbimpl);
|
||||||
|
|
||||||
|
// Write one key to test_cf.
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), handles[1], "old_key", "dontcare"));
|
||||||
|
ASSERT_OK(db_->Flush(FlushOptions(), handles[1]));
|
||||||
|
|
||||||
|
// Write to default_cf and flush this cf several times to advance wal
|
||||||
|
// number. TEST_SwitchMemtable makes sure WALs are not synced and test can
|
||||||
|
// corrupt un-sync WAL.
|
||||||
|
for (int i = 0; i < 2; ++i) {
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), "key" + std::to_string(i), "value"));
|
||||||
|
ASSERT_OK(dbimpl->TEST_SwitchMemtable());
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
handles.clear();
|
||||||
|
CloseDb();
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Corrupt second last un-syned wal file to emulate power reset which
|
||||||
|
// caused the DB to lose the un-synced WAL.
|
||||||
|
{
|
||||||
|
std::vector<uint64_t> file_nums;
|
||||||
|
GetSortedWalFiles(file_nums);
|
||||||
|
size_t size = file_nums.size();
|
||||||
|
assert(size >= 2);
|
||||||
|
uint64_t log_num = file_nums[size - 2];
|
||||||
|
CorruptFileWithTruncation(FileType::kWalFile, log_num,
|
||||||
|
/*bytes_to_truncate=*/8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. After first crash reopen the DB which contains corrupted WAL. Default
|
||||||
|
// family has higher log number than corrupted wal number.
|
||||||
|
//
|
||||||
|
// Case1: If avoid_flush_during_recovery = true, RocksDB won't flush the data
|
||||||
|
// from WAL to L0 for all column families (test_cf_name in this case). As a
|
||||||
|
// result, not all column families can increase their log_numbers, and
|
||||||
|
// min_log_number_to_keep won't change.
|
||||||
|
//
|
||||||
|
// Case2: If avoid_flush_during_recovery = false, all column families have
|
||||||
|
// flushed their data from WAL to L0 during recovery, and none of them will
|
||||||
|
// ever need to read the WALs again.
|
||||||
|
|
||||||
|
// 4. Fault is injected to fail the recovery.
|
||||||
|
{
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"DBImpl::GetLogSizeAndMaybeTruncate:0", [&](void* arg) {
|
||||||
|
auto* tmp_s = reinterpret_cast<Status*>(arg);
|
||||||
|
assert(tmp_s);
|
||||||
|
*tmp_s = Status::IOError("Injected");
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
|
handles.clear();
|
||||||
|
options.avoid_flush_during_recovery = true;
|
||||||
|
s = DB::Open(options, dbname_, cf_descs, &handles, &db_);
|
||||||
|
ASSERT_TRUE(s.IsIOError());
|
||||||
|
ASSERT_EQ("IO error: Injected", s.ToString());
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. After second crash reopen the db with second corruption. Default family
|
||||||
|
// has higher log number than corrupted wal number.
|
||||||
|
//
|
||||||
|
// Case1: If avoid_flush_during_recovery = true, we persist a new
|
||||||
|
// MANIFEST with advanced log_numbers for some column families only after
|
||||||
|
// syncing the WAL. So during second crash, RocksDB will skip the corrupted
|
||||||
|
// WAL files as they have been moved to different folder. Since newly synced
|
||||||
|
// WAL file's sequence number (sentinel WriteBatch) will be the next
|
||||||
|
// sequence number immediately after the largest sequence number recovered
|
||||||
|
// from previous WALs and MANIFEST, db will be in consistent state and opens
|
||||||
|
// successfully.
|
||||||
|
//
|
||||||
|
// Case2: If avoid_flush_during_recovery = false, the corrupted WAL is below
|
||||||
|
// this number. So during a second crash after persisting the new MANIFEST,
|
||||||
|
// RocksDB will skip the corrupted WAL(s) because they are all below this
|
||||||
|
// bound. Therefore, we won't hit the "column family inconsistency" error
|
||||||
|
// message.
|
||||||
|
{
|
||||||
|
options.avoid_flush_during_recovery = avoid_flush_during_recovery_;
|
||||||
|
ASSERT_OK(DB::Open(options, dbname_, cf_descs, &handles, &db_));
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
handles.clear();
|
||||||
|
CloseDb();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// In case of TransactionDB, it enables two-phase-commit. The prepare section of
|
||||||
|
// an uncommitted transaction always need to be kept. Even if we perform flush
|
||||||
|
// during recovery, we may still need to hold an old WAL. The
|
||||||
|
// min_log_number_to_keep won't change, and "Column family inconsistency: SST
|
||||||
|
// file contains data beyond the point of corruption" error will be hit, causing
|
||||||
|
// recovery to fail.
|
||||||
|
//
|
||||||
|
// After adding the fix, only after new WAL is synced, RocksDB persist a new
|
||||||
|
// MANIFEST with column families to ensure RocksDB is in consistent state.
|
||||||
|
// RocksDB writes an empty WriteBatch as a sentinel to the new WAL which is
|
||||||
|
// synced immediately afterwards. The sequence number of the sentinel
|
||||||
|
// WriteBatch will be the next sequence number immediately after the largest
|
||||||
|
// sequence number recovered from previous WALs and MANIFEST because of which DB
|
||||||
|
// will be in consistent state.
|
||||||
|
// If a future recovery starts from the new MANIFEST, then it means the new WAL
|
||||||
|
// is successfully synced. Due to the sentinel empty write batch at the
|
||||||
|
// beginning, kPointInTimeRecovery of WAL is guaranteed to go after this point.
|
||||||
|
// If future recovery starts from the old MANIFEST, it means the writing the new
|
||||||
|
// MANIFEST failed. It won't have the "SST ahead of WAL" error.
|
||||||
|
//
|
||||||
|
// The combination of corrupting a WAL and injecting an error during subsequent
|
||||||
|
// re-open exposes the bug of prematurely persisting a new MANIFEST with
|
||||||
|
// advanced ColumnFamilyData::log_number.
|
||||||
|
TEST_P(CrashDuringRecoveryWithCorruptionTest,
|
||||||
|
DISABLED_TxnDbCrashDuringRecovery) {
|
||||||
|
CloseDb();
|
||||||
|
Options options;
|
||||||
|
options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
|
||||||
|
options.track_and_verify_wals_in_manifest =
|
||||||
|
track_and_verify_wals_in_manifest_;
|
||||||
|
options.avoid_flush_during_recovery = false;
|
||||||
|
options.env = env_;
|
||||||
|
ASSERT_OK(DestroyDB(dbname_, options));
|
||||||
|
options.create_if_missing = true;
|
||||||
|
options.max_write_buffer_number = 3;
|
||||||
|
Reopen(&options);
|
||||||
|
|
||||||
|
// Create cf test_cf_name.
|
||||||
|
ColumnFamilyHandle* cfh = nullptr;
|
||||||
|
const std::string test_cf_name = "test_cf";
|
||||||
|
Status s = db_->CreateColumnFamily(options, test_cf_name, &cfh);
|
||||||
|
ASSERT_OK(s);
|
||||||
|
delete cfh;
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
||||||
|
cf_descs.emplace_back(kDefaultColumnFamilyName, options);
|
||||||
|
cf_descs.emplace_back(test_cf_name, options);
|
||||||
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
|
|
||||||
|
TransactionDB* txn_db = nullptr;
|
||||||
|
TransactionDBOptions txn_db_opts;
|
||||||
|
|
||||||
|
// 1. Open and populate the DB. Write and flush default_cf several times to
|
||||||
|
// advance wal number so that some column families have advanced log_number
|
||||||
|
// while other don't.
|
||||||
|
{
|
||||||
|
ASSERT_OK(TransactionDB::Open(options, txn_db_opts, dbname_, cf_descs,
|
||||||
|
&handles, &txn_db));
|
||||||
|
|
||||||
|
auto* txn = txn_db->BeginTransaction(WriteOptions(), TransactionOptions());
|
||||||
|
// Put cf1
|
||||||
|
ASSERT_OK(txn->Put(handles[1], "foo", "value"));
|
||||||
|
ASSERT_OK(txn->SetName("txn0"));
|
||||||
|
ASSERT_OK(txn->Prepare());
|
||||||
|
ASSERT_OK(txn_db->Flush(FlushOptions()));
|
||||||
|
|
||||||
|
delete txn;
|
||||||
|
txn = nullptr;
|
||||||
|
|
||||||
|
auto* dbimpl = static_cast_with_check<DBImpl>(txn_db->GetRootDB());
|
||||||
|
assert(dbimpl);
|
||||||
|
|
||||||
|
// Put and flush cf0
|
||||||
|
for (int i = 0; i < 2; ++i) {
|
||||||
|
ASSERT_OK(txn_db->Put(WriteOptions(), "dontcare", "value"));
|
||||||
|
ASSERT_OK(dbimpl->TEST_SwitchMemtable());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put cf1
|
||||||
|
txn = txn_db->BeginTransaction(WriteOptions(), TransactionOptions());
|
||||||
|
ASSERT_OK(txn->Put(handles[1], "foo1", "value"));
|
||||||
|
ASSERT_OK(txn->Commit());
|
||||||
|
|
||||||
|
delete txn;
|
||||||
|
txn = nullptr;
|
||||||
|
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
handles.clear();
|
||||||
|
delete txn_db;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Corrupt second last wal to emulate power reset which caused the DB to
|
||||||
|
// lose the un-synced WAL.
|
||||||
|
{
|
||||||
|
std::vector<uint64_t> file_nums;
|
||||||
|
GetSortedWalFiles(file_nums);
|
||||||
|
size_t size = file_nums.size();
|
||||||
|
assert(size >= 2);
|
||||||
|
uint64_t log_num = file_nums[size - 2];
|
||||||
|
CorruptFileWithTruncation(FileType::kWalFile, log_num,
|
||||||
|
/*bytes_to_truncate=*/8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. After first crash reopen the DB which contains corrupted WAL. Default
|
||||||
|
// family has higher log number than corrupted wal number. There may be old
|
||||||
|
// WAL files that it must not delete because they can contain data of
|
||||||
|
// uncommitted transactions. As a result, min_log_number_to_keep won't change.
|
||||||
|
|
||||||
|
{
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"DBImpl::Open::BeforeSyncWAL", [&](void* arg) {
|
||||||
|
auto* tmp_s = reinterpret_cast<Status*>(arg);
|
||||||
|
assert(tmp_s);
|
||||||
|
*tmp_s = Status::IOError("Injected");
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
|
handles.clear();
|
||||||
|
s = TransactionDB::Open(options, txn_db_opts, dbname_, cf_descs, &handles,
|
||||||
|
&txn_db);
|
||||||
|
ASSERT_TRUE(s.IsIOError());
|
||||||
|
ASSERT_EQ("IO error: Injected", s.ToString());
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Corrupt max_wal_num.
|
||||||
|
{
|
||||||
|
std::vector<uint64_t> file_nums;
|
||||||
|
GetSortedWalFiles(file_nums);
|
||||||
|
size_t size = file_nums.size();
|
||||||
|
assert(size >= 2);
|
||||||
|
uint64_t log_num = file_nums[size - 1];
|
||||||
|
CorruptFileWithTruncation(FileType::kWalFile, log_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. After second crash reopen the db with second corruption. Default family
|
||||||
|
// has higher log number than corrupted wal number.
|
||||||
|
// We persist a new MANIFEST with advanced log_numbers for some column
|
||||||
|
// families only after syncing the WAL. So during second crash, RocksDB will
|
||||||
|
// skip the corrupted WAL files as they have been moved to different folder.
|
||||||
|
// Since newly synced WAL file's sequence number (sentinel WriteBatch) will be
|
||||||
|
// the next sequence number immediately after the largest sequence number
|
||||||
|
// recovered from previous WALs and MANIFEST, db will be in consistent state
|
||||||
|
// and opens successfully.
|
||||||
|
{
|
||||||
|
ASSERT_OK(TransactionDB::Open(options, txn_db_opts, dbname_, cf_descs,
|
||||||
|
&handles, &txn_db));
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
delete txn_db;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test is similar to
|
||||||
|
// CrashDuringRecoveryWithCorruptionTest.CrashDuringRecovery except it calls
|
||||||
|
// flush and corrupts Last WAL. It calls flush to sync some of the WALs and
|
||||||
|
// remaining are unsyned one of which is then corrupted to simulate crash.
|
||||||
|
//
|
||||||
|
// In case of non-TransactionDB with avoid_flush_during_recovery = true, RocksDB
|
||||||
|
// won't flush the data from WAL to L0 for all column families if possible. As a
|
||||||
|
// result, not all column families can increase their log_numbers, and
|
||||||
|
// min_log_number_to_keep won't change.
|
||||||
|
// It may prematurely persist a new MANIFEST even before we can declare the DB
|
||||||
|
// is in consistent state after recovery (this is when the new WAL is synced)
|
||||||
|
// and advances log_numbers for some column families.
|
||||||
|
//
|
||||||
|
// If there is power failure before we sync the new WAL, we will end up in
|
||||||
|
// a situation in which after persisting the MANIFEST, RocksDB will see some
|
||||||
|
// column families' log_numbers larger than the corrupted wal, and
|
||||||
|
// "Column family inconsistency: SST file contains data beyond the point of
|
||||||
|
// corruption" error will be hit, causing recovery to fail.
|
||||||
|
//
|
||||||
|
// After adding the fix, only after new WAL is synced, RocksDB persist a new
|
||||||
|
// MANIFEST with column families to ensure RocksDB is in consistent state.
|
||||||
|
// RocksDB writes an empty WriteBatch as a sentinel to the new WAL which is
|
||||||
|
// synced immediately afterwards. The sequence number of the sentinel
|
||||||
|
// WriteBatch will be the next sequence number immediately after the largest
|
||||||
|
// sequence number recovered from previous WALs and MANIFEST because of which DB
|
||||||
|
// will be in consistent state.
|
||||||
|
// If a future recovery starts from the new MANIFEST, then it means the new WAL
|
||||||
|
// is successfully synced. Due to the sentinel empty write batch at the
|
||||||
|
// beginning, kPointInTimeRecovery of WAL is guaranteed to go after this point.
|
||||||
|
// If future recovery starts from the old MANIFEST, it means the writing the new
|
||||||
|
// MANIFEST failed. It won't have the "SST ahead of WAL" error.
|
||||||
|
|
||||||
|
// The combination of corrupting a WAL and injecting an error during subsequent
|
||||||
|
// re-open exposes the bug of prematurely persisting a new MANIFEST with
|
||||||
|
// advanced ColumnFamilyData::log_number.
|
||||||
|
TEST_P(CrashDuringRecoveryWithCorruptionTest,
|
||||||
|
DISABLED_CrashDuringRecoveryWithFlush) {
|
||||||
|
CloseDb();
|
||||||
|
Options options;
|
||||||
|
options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
|
||||||
|
options.avoid_flush_during_recovery = false;
|
||||||
|
options.env = env_;
|
||||||
|
options.create_if_missing = true;
|
||||||
|
|
||||||
|
ASSERT_OK(DestroyDB(dbname_, options));
|
||||||
|
Reopen(&options);
|
||||||
|
|
||||||
|
ColumnFamilyHandle* cfh = nullptr;
|
||||||
|
const std::string test_cf_name = "test_cf";
|
||||||
|
Status s = db_->CreateColumnFamily(options, test_cf_name, &cfh);
|
||||||
|
ASSERT_OK(s);
|
||||||
|
delete cfh;
|
||||||
|
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
||||||
|
cf_descs.emplace_back(kDefaultColumnFamilyName, options);
|
||||||
|
cf_descs.emplace_back(test_cf_name, options);
|
||||||
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
|
|
||||||
|
{
|
||||||
|
ASSERT_OK(DB::Open(options, dbname_, cf_descs, &handles, &db_));
|
||||||
|
|
||||||
|
// Write one key to test_cf.
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), handles[1], "old_key", "dontcare"));
|
||||||
|
|
||||||
|
// Write to default_cf and flush this cf several times to advance wal
|
||||||
|
// number.
|
||||||
|
for (int i = 0; i < 2; ++i) {
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), "key" + std::to_string(i), "value"));
|
||||||
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_OK(db_->Put(WriteOptions(), handles[1], "dontcare", "dontcare"));
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
handles.clear();
|
||||||
|
CloseDb();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Corrupt second last un-syned wal file to emulate power reset which
|
||||||
|
// caused the DB to lose the un-synced WAL.
|
||||||
|
{
|
||||||
|
std::vector<uint64_t> file_nums;
|
||||||
|
GetSortedWalFiles(file_nums);
|
||||||
|
size_t size = file_nums.size();
|
||||||
|
uint64_t log_num = file_nums[size - 1];
|
||||||
|
CorruptFileWithTruncation(FileType::kWalFile, log_num,
|
||||||
|
/*bytes_to_truncate=*/8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fault is injected to fail the recovery.
|
||||||
|
{
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"DBImpl::GetLogSizeAndMaybeTruncate:0", [&](void* arg) {
|
||||||
|
auto* tmp_s = reinterpret_cast<Status*>(arg);
|
||||||
|
assert(tmp_s);
|
||||||
|
*tmp_s = Status::IOError("Injected");
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
|
handles.clear();
|
||||||
|
options.avoid_flush_during_recovery = true;
|
||||||
|
s = DB::Open(options, dbname_, cf_descs, &handles, &db_);
|
||||||
|
ASSERT_TRUE(s.IsIOError());
|
||||||
|
ASSERT_EQ("IO error: Injected", s.ToString());
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
CloseDb();
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reopen db again
|
||||||
|
{
|
||||||
|
options.avoid_flush_during_recovery = avoid_flush_during_recovery_;
|
||||||
|
ASSERT_OK(DB::Open(options, dbname_, cf_descs, &handles, &db_));
|
||||||
|
for (auto* h : handles) {
|
||||||
|
delete h;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
@ -95,8 +95,8 @@ class CuckooTableDBTest : public testing::Test {
|
|||||||
|
|
||||||
int NumTableFilesAtLevel(int level) {
|
int NumTableFilesAtLevel(int level) {
|
||||||
std::string property;
|
std::string property;
|
||||||
EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level),
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
&property));
|
"rocksdb.num-files-at-level" + std::to_string(level), &property));
|
||||||
return atoi(property.c_str());
|
return atoi(property.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +175,10 @@ TEST_F(DBBasicTest, ReadOnlyDB) {
|
|||||||
ASSERT_TRUE(db_->SyncWAL().IsNotSupported());
|
ASSERT_TRUE(db_->SyncWAL().IsNotSupported());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBBasicTest, ReadOnlyDBWithWriteDBIdToManifestSet) {
|
// TODO akanksha: Update the test to check that combination
|
||||||
|
// does not actually write to FS (use open read-only with
|
||||||
|
// CompositeEnvWrapper+ReadOnlyFileSystem).
|
||||||
|
TEST_F(DBBasicTest, DISABLED_ReadOnlyDBWithWriteDBIdToManifestSet) {
|
||||||
ASSERT_OK(Put("foo", "v1"));
|
ASSERT_OK(Put("foo", "v1"));
|
||||||
ASSERT_OK(Put("bar", "v2"));
|
ASSERT_OK(Put("bar", "v2"));
|
||||||
ASSERT_OK(Put("foo", "v3"));
|
ASSERT_OK(Put("foo", "v3"));
|
||||||
@ -3780,7 +3783,7 @@ TEST_P(DBBasicTestDeadline, PointLookupDeadline) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 400; ++i) {
|
for (int i = 0; i < 400; ++i) {
|
||||||
std::string key = "k" + ToString(i);
|
std::string key = "k" + std::to_string(i);
|
||||||
ASSERT_OK(Put(key, rnd.RandomString(100)));
|
ASSERT_OK(Put(key, rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -3863,7 +3866,7 @@ TEST_P(DBBasicTestDeadline, IteratorDeadline) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 400; ++i) {
|
for (int i = 0; i < 400; ++i) {
|
||||||
std::string key = "k" + ToString(i);
|
std::string key = "k" + std::to_string(i);
|
||||||
ASSERT_OK(Put(key, rnd.RandomString(100)));
|
ASSERT_OK(Put(key, rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
|
|
||||||
#include "cache/cache_entry_roles.h"
|
#include "cache/cache_entry_roles.h"
|
||||||
#include "cache/cache_key.h"
|
#include "cache/cache_key.h"
|
||||||
|
#include "cache/fast_lru_cache.h"
|
||||||
#include "cache/lru_cache.h"
|
#include "cache/lru_cache.h"
|
||||||
#include "db/column_family.h"
|
#include "db/column_family.h"
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
@ -75,7 +76,7 @@ class DBBlockCacheTest : public DBTestBase {
|
|||||||
void InitTable(const Options& /*options*/) {
|
void InitTable(const Options& /*options*/) {
|
||||||
std::string value(kValueSize, 'a');
|
std::string value(kValueSize, 'a');
|
||||||
for (size_t i = 0; i < kNumBlocks; i++) {
|
for (size_t i = 0; i < kNumBlocks; i++) {
|
||||||
ASSERT_OK(Put(ToString(i), value.c_str()));
|
ASSERT_OK(Put(std::to_string(i), value.c_str()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,7 +205,7 @@ TEST_F(DBBlockCacheTest, IteratorBlockCacheUsage) {
|
|||||||
|
|
||||||
ASSERT_EQ(0, cache->GetUsage());
|
ASSERT_EQ(0, cache->GetUsage());
|
||||||
iter = db_->NewIterator(read_options);
|
iter = db_->NewIterator(read_options);
|
||||||
iter->Seek(ToString(0));
|
iter->Seek(std::to_string(0));
|
||||||
ASSERT_LT(0, cache->GetUsage());
|
ASSERT_LT(0, cache->GetUsage());
|
||||||
delete iter;
|
delete iter;
|
||||||
iter = nullptr;
|
iter = nullptr;
|
||||||
@ -235,7 +236,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
|||||||
// Load blocks into cache.
|
// Load blocks into cache.
|
||||||
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
||||||
iter = db_->NewIterator(read_options);
|
iter = db_->NewIterator(read_options);
|
||||||
iter->Seek(ToString(i));
|
iter->Seek(std::to_string(i));
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
CheckCacheCounters(options, 1, 0, 1, 0);
|
CheckCacheCounters(options, 1, 0, 1, 0);
|
||||||
iterators[i].reset(iter);
|
iterators[i].reset(iter);
|
||||||
@ -248,7 +249,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
|||||||
// Test with strict capacity limit.
|
// Test with strict capacity limit.
|
||||||
cache->SetStrictCapacityLimit(true);
|
cache->SetStrictCapacityLimit(true);
|
||||||
iter = db_->NewIterator(read_options);
|
iter = db_->NewIterator(read_options);
|
||||||
iter->Seek(ToString(kNumBlocks - 1));
|
iter->Seek(std::to_string(kNumBlocks - 1));
|
||||||
ASSERT_TRUE(iter->status().IsIncomplete());
|
ASSERT_TRUE(iter->status().IsIncomplete());
|
||||||
CheckCacheCounters(options, 1, 0, 0, 1);
|
CheckCacheCounters(options, 1, 0, 0, 1);
|
||||||
delete iter;
|
delete iter;
|
||||||
@ -262,7 +263,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
|||||||
ASSERT_EQ(0, cache->GetPinnedUsage());
|
ASSERT_EQ(0, cache->GetPinnedUsage());
|
||||||
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
||||||
iter = db_->NewIterator(read_options);
|
iter = db_->NewIterator(read_options);
|
||||||
iter->Seek(ToString(i));
|
iter->Seek(std::to_string(i));
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
CheckCacheCounters(options, 0, 1, 0, 0);
|
CheckCacheCounters(options, 0, 1, 0, 0);
|
||||||
iterators[i].reset(iter);
|
iterators[i].reset(iter);
|
||||||
@ -288,7 +289,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
|||||||
|
|
||||||
std::string value(kValueSize, 'a');
|
std::string value(kValueSize, 'a');
|
||||||
for (size_t i = 0; i < kNumBlocks; i++) {
|
for (size_t i = 0; i < kNumBlocks; i++) {
|
||||||
ASSERT_OK(Put(ToString(i), value));
|
ASSERT_OK(Put(std::to_string(i), value));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -312,7 +313,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
|||||||
|
|
||||||
// Load blocks into cache.
|
// Load blocks into cache.
|
||||||
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
||||||
ASSERT_EQ(value, Get(ToString(i)));
|
ASSERT_EQ(value, Get(std::to_string(i)));
|
||||||
CheckCacheCounters(options, 1, 0, 1, 0);
|
CheckCacheCounters(options, 1, 0, 1, 0);
|
||||||
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
||||||
}
|
}
|
||||||
@ -333,7 +334,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
|||||||
|
|
||||||
// Load last key block.
|
// Load last key block.
|
||||||
ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.",
|
ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.",
|
||||||
Get(ToString(kNumBlocks - 1)));
|
Get(std::to_string(kNumBlocks - 1)));
|
||||||
// Failure will also record the miss counter.
|
// Failure will also record the miss counter.
|
||||||
CheckCacheCounters(options, 1, 0, 0, 1);
|
CheckCacheCounters(options, 1, 0, 0, 1);
|
||||||
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
||||||
@ -342,7 +343,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
|||||||
// cache and load into block cache.
|
// cache and load into block cache.
|
||||||
cache->SetStrictCapacityLimit(false);
|
cache->SetStrictCapacityLimit(false);
|
||||||
// Load last key block.
|
// Load last key block.
|
||||||
ASSERT_EQ(value, Get(ToString(kNumBlocks - 1)));
|
ASSERT_EQ(value, Get(std::to_string(kNumBlocks - 1)));
|
||||||
CheckCacheCounters(options, 1, 0, 1, 0);
|
CheckCacheCounters(options, 1, 0, 1, 0);
|
||||||
CheckCompressedCacheCounters(options, 0, 1, 0, 0);
|
CheckCompressedCacheCounters(options, 0, 1, 0, 0);
|
||||||
}
|
}
|
||||||
@ -567,7 +568,7 @@ TEST_F(DBBlockCacheTest, FillCacheAndIterateDB) {
|
|||||||
Iterator* iter = nullptr;
|
Iterator* iter = nullptr;
|
||||||
|
|
||||||
iter = db_->NewIterator(read_options);
|
iter = db_->NewIterator(read_options);
|
||||||
iter->Seek(ToString(0));
|
iter->Seek(std::to_string(0));
|
||||||
while (iter->Valid()) {
|
while (iter->Valid()) {
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
@ -645,10 +646,10 @@ TEST_F(DBBlockCacheTest, WarmCacheWithDataBlocksDuringFlush) {
|
|||||||
|
|
||||||
std::string value(kValueSize, 'a');
|
std::string value(kValueSize, 'a');
|
||||||
for (size_t i = 1; i <= kNumBlocks; i++) {
|
for (size_t i = 1; i <= kNumBlocks; i++) {
|
||||||
ASSERT_OK(Put(ToString(i), value));
|
ASSERT_OK(Put(std::to_string(i), value));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
||||||
ASSERT_EQ(value, Get(ToString(i)));
|
ASSERT_EQ(value, Get(std::to_string(i)));
|
||||||
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
||||||
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT));
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT));
|
||||||
}
|
}
|
||||||
@ -705,7 +706,7 @@ TEST_P(DBBlockCacheTest1, WarmCacheWithBlocksDuringFlush) {
|
|||||||
|
|
||||||
std::string value(kValueSize, 'a');
|
std::string value(kValueSize, 'a');
|
||||||
for (size_t i = 1; i <= kNumBlocks; i++) {
|
for (size_t i = 1; i <= kNumBlocks; i++) {
|
||||||
ASSERT_OK(Put(ToString(i), value));
|
ASSERT_OK(Put(std::to_string(i), value));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD));
|
||||||
if (filter_type == 1) {
|
if (filter_type == 1) {
|
||||||
@ -717,7 +718,7 @@ TEST_P(DBBlockCacheTest1, WarmCacheWithBlocksDuringFlush) {
|
|||||||
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD));
|
||||||
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
|
||||||
}
|
}
|
||||||
ASSERT_EQ(value, Get(ToString(i)));
|
ASSERT_EQ(value, Get(std::to_string(i)));
|
||||||
|
|
||||||
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
||||||
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT));
|
ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT));
|
||||||
@ -772,12 +773,12 @@ TEST_F(DBBlockCacheTest, DynamicallyWarmCacheDuringFlush) {
|
|||||||
std::string value(kValueSize, 'a');
|
std::string value(kValueSize, 'a');
|
||||||
|
|
||||||
for (size_t i = 1; i <= 5; i++) {
|
for (size_t i = 1; i <= 5; i++) {
|
||||||
ASSERT_OK(Put(ToString(i), value));
|
ASSERT_OK(Put(std::to_string(i), value));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_EQ(1,
|
ASSERT_EQ(1,
|
||||||
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
||||||
|
|
||||||
ASSERT_EQ(value, Get(ToString(i)));
|
ASSERT_EQ(value, Get(std::to_string(i)));
|
||||||
ASSERT_EQ(0,
|
ASSERT_EQ(0,
|
||||||
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
@ -790,12 +791,12 @@ TEST_F(DBBlockCacheTest, DynamicallyWarmCacheDuringFlush) {
|
|||||||
{{"block_based_table_factory", "{prepopulate_block_cache=kDisable;}"}}));
|
{{"block_based_table_factory", "{prepopulate_block_cache=kDisable;}"}}));
|
||||||
|
|
||||||
for (size_t i = 6; i <= kNumBlocks; i++) {
|
for (size_t i = 6; i <= kNumBlocks; i++) {
|
||||||
ASSERT_OK(Put(ToString(i), value));
|
ASSERT_OK(Put(std::to_string(i), value));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_EQ(0,
|
ASSERT_EQ(0,
|
||||||
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
||||||
|
|
||||||
ASSERT_EQ(value, Get(ToString(i)));
|
ASSERT_EQ(value, Get(std::to_string(i)));
|
||||||
ASSERT_EQ(1,
|
ASSERT_EQ(1,
|
||||||
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD));
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
@ -934,7 +935,8 @@ TEST_F(DBBlockCacheTest, AddRedundantStats) {
|
|||||||
int iterations_tested = 0;
|
int iterations_tested = 0;
|
||||||
for (std::shared_ptr<Cache> base_cache :
|
for (std::shared_ptr<Cache> base_cache :
|
||||||
{NewLRUCache(capacity, num_shard_bits),
|
{NewLRUCache(capacity, num_shard_bits),
|
||||||
NewClockCache(capacity, num_shard_bits)}) {
|
NewClockCache(capacity, num_shard_bits),
|
||||||
|
NewFastLRUCache(capacity, num_shard_bits)}) {
|
||||||
if (!base_cache) {
|
if (!base_cache) {
|
||||||
// Skip clock cache when not supported
|
// Skip clock cache when not supported
|
||||||
continue;
|
continue;
|
||||||
@ -1288,7 +1290,8 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) {
|
|||||||
int iterations_tested = 0;
|
int iterations_tested = 0;
|
||||||
for (bool partition : {false, true}) {
|
for (bool partition : {false, true}) {
|
||||||
for (std::shared_ptr<Cache> cache :
|
for (std::shared_ptr<Cache> cache :
|
||||||
{NewLRUCache(capacity), NewClockCache(capacity)}) {
|
{NewLRUCache(capacity), NewClockCache(capacity),
|
||||||
|
NewFastLRUCache(capacity)}) {
|
||||||
if (!cache) {
|
if (!cache) {
|
||||||
// Skip clock cache when not supported
|
// Skip clock cache when not supported
|
||||||
continue;
|
continue;
|
||||||
@ -1404,21 +1407,11 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) {
|
|||||||
ASSERT_TRUE(
|
ASSERT_TRUE(
|
||||||
db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
|
db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
|
||||||
|
|
||||||
EXPECT_EQ(
|
for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
|
||||||
ToString(expected[static_cast<size_t>(CacheEntryRole::kIndexBlock)]),
|
auto role = static_cast<CacheEntryRole>(i);
|
||||||
values["count.index-block"]);
|
EXPECT_EQ(std::to_string(expected[i]),
|
||||||
EXPECT_EQ(
|
values[BlockCacheEntryStatsMapKeys::EntryCount(role)]);
|
||||||
ToString(expected[static_cast<size_t>(CacheEntryRole::kDataBlock)]),
|
}
|
||||||
values["count.data-block"]);
|
|
||||||
EXPECT_EQ(
|
|
||||||
ToString(expected[static_cast<size_t>(CacheEntryRole::kFilterBlock)]),
|
|
||||||
values["count.filter-block"]);
|
|
||||||
EXPECT_EQ(
|
|
||||||
ToString(
|
|
||||||
prev_expected[static_cast<size_t>(CacheEntryRole::kWriteBuffer)]),
|
|
||||||
values["count.write-buffer"]);
|
|
||||||
EXPECT_EQ(ToString(expected[static_cast<size_t>(CacheEntryRole::kMisc)]),
|
|
||||||
values["count.misc"]);
|
|
||||||
|
|
||||||
// Add one for kWriteBuffer
|
// Add one for kWriteBuffer
|
||||||
{
|
{
|
||||||
@ -1429,18 +1422,20 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) {
|
|||||||
// re-scanning stats, but not totally aggressive.
|
// re-scanning stats, but not totally aggressive.
|
||||||
// Within some time window, we will get cached entry stats
|
// Within some time window, we will get cached entry stats
|
||||||
env_->MockSleepForSeconds(1);
|
env_->MockSleepForSeconds(1);
|
||||||
EXPECT_EQ(ToString(prev_expected[static_cast<size_t>(
|
EXPECT_EQ(std::to_string(prev_expected[static_cast<size_t>(
|
||||||
CacheEntryRole::kWriteBuffer)]),
|
CacheEntryRole::kWriteBuffer)]),
|
||||||
values["count.write-buffer"]);
|
values[BlockCacheEntryStatsMapKeys::EntryCount(
|
||||||
|
CacheEntryRole::kWriteBuffer)]);
|
||||||
// Not enough for a "background" miss but enough for a "foreground" miss
|
// Not enough for a "background" miss but enough for a "foreground" miss
|
||||||
env_->MockSleepForSeconds(45);
|
env_->MockSleepForSeconds(45);
|
||||||
|
|
||||||
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats,
|
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats,
|
||||||
&values));
|
&values));
|
||||||
EXPECT_EQ(
|
EXPECT_EQ(
|
||||||
ToString(
|
std::to_string(
|
||||||
expected[static_cast<size_t>(CacheEntryRole::kWriteBuffer)]),
|
expected[static_cast<size_t>(CacheEntryRole::kWriteBuffer)]),
|
||||||
values["count.write-buffer"]);
|
values[BlockCacheEntryStatsMapKeys::EntryCount(
|
||||||
|
CacheEntryRole::kWriteBuffer)]);
|
||||||
}
|
}
|
||||||
prev_expected = expected;
|
prev_expected = expected;
|
||||||
|
|
||||||
@ -1645,7 +1640,7 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
|
|||||||
SstFileWriter sst_file_writer(EnvOptions(), options);
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
||||||
std::vector<std::string> external;
|
std::vector<std::string> external;
|
||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
std::string f = dbname_ + "/external" + ToString(i) + ".sst";
|
std::string f = dbname_ + "/external" + std::to_string(i) + ".sst";
|
||||||
external.push_back(f);
|
external.push_back(f);
|
||||||
ASSERT_OK(sst_file_writer.Open(f));
|
ASSERT_OK(sst_file_writer.Open(f));
|
||||||
ASSERT_OK(sst_file_writer.Put(Key(key_count), "abc"));
|
ASSERT_OK(sst_file_writer.Put(Key(key_count), "abc"));
|
||||||
@ -1729,7 +1724,7 @@ class CacheKeyTest : public testing::Test {
|
|||||||
// Like SemiStructuredUniqueIdGen::GenerateNext
|
// Like SemiStructuredUniqueIdGen::GenerateNext
|
||||||
tp_.db_session_id = EncodeSessionId(base_session_upper_,
|
tp_.db_session_id = EncodeSessionId(base_session_upper_,
|
||||||
base_session_lower_ ^ session_counter_);
|
base_session_lower_ ^ session_counter_);
|
||||||
tp_.db_id = ToString(db_id_);
|
tp_.db_id = std::to_string(db_id_);
|
||||||
tp_.orig_file_number = file_number_;
|
tp_.orig_file_number = file_number_;
|
||||||
bool is_stable;
|
bool is_stable;
|
||||||
std::string cur_session_id = ""; // ignored
|
std::string cur_session_id = ""; // ignored
|
||||||
|
@ -111,6 +111,7 @@ TEST_P(DBBloomFilterTestDefFormatVersion, KeyMayExist) {
|
|||||||
options_override.filter_policy = Create(20, bfp_impl_);
|
options_override.filter_policy = Create(20, bfp_impl_);
|
||||||
options_override.partition_filters = partition_filters_;
|
options_override.partition_filters = partition_filters_;
|
||||||
options_override.metadata_block_size = 32;
|
options_override.metadata_block_size = 32;
|
||||||
|
options_override.full_block_cache = true;
|
||||||
Options options = CurrentOptions(options_override);
|
Options options = CurrentOptions(options_override);
|
||||||
if (partition_filters_) {
|
if (partition_filters_) {
|
||||||
auto* table_options =
|
auto* table_options =
|
||||||
@ -857,7 +858,7 @@ TEST_F(DBBloomFilterTest, BloomFilterCompatibility) {
|
|||||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
std::string prefix = ToString(i) + "_";
|
std::string prefix = std::to_string(i) + "_";
|
||||||
ASSERT_OK(Put(prefix + "A", "val"));
|
ASSERT_OK(Put(prefix + "A", "val"));
|
||||||
ASSERT_OK(Put(prefix + "Z", "val"));
|
ASSERT_OK(Put(prefix + "Z", "val"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -872,7 +873,7 @@ TEST_F(DBBloomFilterTest, BloomFilterCompatibility) {
|
|||||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
for (size_t j = 0; j < kCompatibilityConfigs.size(); ++j) {
|
for (size_t j = 0; j < kCompatibilityConfigs.size(); ++j) {
|
||||||
std::string prefix = ToString(j) + "_";
|
std::string prefix = std::to_string(j) + "_";
|
||||||
ASSERT_EQ("val", Get(prefix + "A")); // Filter positive
|
ASSERT_EQ("val", Get(prefix + "A")); // Filter positive
|
||||||
ASSERT_EQ("val", Get(prefix + "Z")); // Filter positive
|
ASSERT_EQ("val", Get(prefix + "Z")); // Filter positive
|
||||||
// Filter negative, with high probability
|
// Filter negative, with high probability
|
||||||
@ -1074,7 +1075,8 @@ class DBFilterConstructionReserveMemoryTestWithParam
|
|||||||
};
|
};
|
||||||
|
|
||||||
INSTANTIATE_TEST_CASE_P(
|
INSTANTIATE_TEST_CASE_P(
|
||||||
BlockBasedTableOptions, DBFilterConstructionReserveMemoryTestWithParam,
|
DBFilterConstructionReserveMemoryTestWithParam,
|
||||||
|
DBFilterConstructionReserveMemoryTestWithParam,
|
||||||
::testing::Values(std::make_tuple(false, kFastLocalBloom, false, false),
|
::testing::Values(std::make_tuple(false, kFastLocalBloom, false, false),
|
||||||
|
|
||||||
std::make_tuple(true, kFastLocalBloom, false, false),
|
std::make_tuple(true, kFastLocalBloom, false, false),
|
||||||
@ -1090,7 +1092,7 @@ INSTANTIATE_TEST_CASE_P(
|
|||||||
std::make_tuple(true, kDeprecatedBlock, false, false),
|
std::make_tuple(true, kDeprecatedBlock, false, false),
|
||||||
std::make_tuple(true, kLegacyBloom, false, false)));
|
std::make_tuple(true, kLegacyBloom, false, false)));
|
||||||
|
|
||||||
// TODO: Speed up this test.
|
// TODO: Speed up this test, and reduce disk space usage (~700MB)
|
||||||
// The current test inserts many keys (on the scale of dummy entry size)
|
// The current test inserts many keys (on the scale of dummy entry size)
|
||||||
// in order to make small memory user (e.g, final filter, partitioned hash
|
// in order to make small memory user (e.g, final filter, partitioned hash
|
||||||
// entries/filter/banding) , which is proportional to the number of
|
// entries/filter/banding) , which is proportional to the number of
|
||||||
@ -1711,11 +1713,11 @@ class TestingContextCustomFilterPolicy
|
|||||||
test_report_ +=
|
test_report_ +=
|
||||||
OptionsHelper::compaction_style_to_string[context.compaction_style];
|
OptionsHelper::compaction_style_to_string[context.compaction_style];
|
||||||
test_report_ += ",n=";
|
test_report_ += ",n=";
|
||||||
test_report_ += ROCKSDB_NAMESPACE::ToString(context.num_levels);
|
test_report_ += std::to_string(context.num_levels);
|
||||||
test_report_ += ",l=";
|
test_report_ += ",l=";
|
||||||
test_report_ += ROCKSDB_NAMESPACE::ToString(context.level_at_creation);
|
test_report_ += std::to_string(context.level_at_creation);
|
||||||
test_report_ += ",b=";
|
test_report_ += ",b=";
|
||||||
test_report_ += ROCKSDB_NAMESPACE::ToString(int{context.is_bottommost});
|
test_report_ += std::to_string(int{context.is_bottommost});
|
||||||
test_report_ += ",r=";
|
test_report_ += ",r=";
|
||||||
test_report_ += table_file_creation_reason_to_string[context.reason];
|
test_report_ += table_file_creation_reason_to_string[context.reason];
|
||||||
test_report_ += "\n";
|
test_report_ += "\n";
|
||||||
|
@ -454,7 +454,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
|
|||||||
// put some data
|
// put some data
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -755,7 +755,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
|
|||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
// Compaction filters aplies to all records, regardless snapshots.
|
// Compaction filters aplies to all records, regardless snapshots.
|
||||||
TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
||||||
std::string five = ToString(5);
|
std::string five = std::to_string(5);
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
options.compaction_filter_factory = std::make_shared<DeleteISFilterFactory>();
|
options.compaction_filter_factory = std::make_shared<DeleteISFilterFactory>();
|
||||||
options.disable_auto_compactions = true;
|
options.disable_auto_compactions = true;
|
||||||
@ -766,7 +766,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
|||||||
const Snapshot* snapshot = nullptr;
|
const Snapshot* snapshot = nullptr;
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -968,6 +968,71 @@ TEST_F(DBTestCompactionFilter, IgnoreSnapshotsFalseRecovery) {
|
|||||||
ASSERT_TRUE(TryReopen(options).IsNotSupported());
|
ASSERT_TRUE(TryReopen(options).IsNotSupported());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(DBTestCompactionFilter, DropKeyWithSingleDelete) {
|
||||||
|
Options options = GetDefaultOptions();
|
||||||
|
options.create_if_missing = true;
|
||||||
|
|
||||||
|
Reopen(options);
|
||||||
|
|
||||||
|
ASSERT_OK(Put("a", "v0"));
|
||||||
|
ASSERT_OK(Put("b", "v0"));
|
||||||
|
const Snapshot* snapshot = db_->GetSnapshot();
|
||||||
|
|
||||||
|
ASSERT_OK(SingleDelete("b"));
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
{
|
||||||
|
CompactRangeOptions cro;
|
||||||
|
cro.change_level = true;
|
||||||
|
cro.target_level = options.num_levels - 1;
|
||||||
|
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
db_->ReleaseSnapshot(snapshot);
|
||||||
|
Close();
|
||||||
|
|
||||||
|
class DeleteFilterV2 : public CompactionFilter {
|
||||||
|
public:
|
||||||
|
Decision FilterV2(int /*level*/, const Slice& key, ValueType /*value_type*/,
|
||||||
|
const Slice& /*existing_value*/,
|
||||||
|
std::string* /*new_value*/,
|
||||||
|
std::string* /*skip_until*/) const override {
|
||||||
|
if (key.starts_with("b")) {
|
||||||
|
return Decision::kPurge;
|
||||||
|
}
|
||||||
|
return Decision::kRemove;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* Name() const override { return "DeleteFilterV2"; }
|
||||||
|
} delete_filter_v2;
|
||||||
|
|
||||||
|
options.compaction_filter = &delete_filter_v2;
|
||||||
|
options.level0_file_num_compaction_trigger = 2;
|
||||||
|
Reopen(options);
|
||||||
|
|
||||||
|
ASSERT_OK(Put("b", "v1"));
|
||||||
|
ASSERT_OK(Put("x", "v1"));
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
ASSERT_OK(Put("r", "v1"));
|
||||||
|
ASSERT_OK(Put("z", "v1"));
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
|
Close();
|
||||||
|
|
||||||
|
options.compaction_filter = nullptr;
|
||||||
|
Reopen(options);
|
||||||
|
ASSERT_OK(SingleDelete("b"));
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
{
|
||||||
|
CompactRangeOptions cro;
|
||||||
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
||||||
|
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
@ -2409,6 +2409,30 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionCFPathUse) {
|
|||||||
|
|
||||||
check_getvalues();
|
check_getvalues();
|
||||||
|
|
||||||
|
{ // Also verify GetLiveFilesStorageInfo with db_paths / cf_paths
|
||||||
|
std::vector<LiveFileStorageInfo> new_infos;
|
||||||
|
LiveFilesStorageInfoOptions lfsio;
|
||||||
|
lfsio.wal_size_for_flush = UINT64_MAX; // no flush
|
||||||
|
ASSERT_OK(db_->GetLiveFilesStorageInfo(lfsio, &new_infos));
|
||||||
|
std::unordered_map<std::string, int> live_sst_by_dir;
|
||||||
|
for (auto& info : new_infos) {
|
||||||
|
if (info.file_type == kTableFile) {
|
||||||
|
live_sst_by_dir[info.directory]++;
|
||||||
|
// Verify file on disk (no directory confusion)
|
||||||
|
uint64_t size;
|
||||||
|
ASSERT_OK(env_->GetFileSize(
|
||||||
|
info.directory + "/" + info.relative_filename, &size));
|
||||||
|
ASSERT_EQ(info.size, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT_EQ(3U * 3U, live_sst_by_dir.size());
|
||||||
|
for (auto& paths : {options.db_paths, cf_opt1.cf_paths, cf_opt2.cf_paths}) {
|
||||||
|
ASSERT_EQ(1, live_sst_by_dir[paths[0].path]);
|
||||||
|
ASSERT_EQ(4, live_sst_by_dir[paths[1].path]);
|
||||||
|
ASSERT_EQ(2, live_sst_by_dir[paths[2].path]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ReopenWithColumnFamilies({"default", "one", "two"}, option_vector);
|
ReopenWithColumnFamilies({"default", "one", "two"}, option_vector);
|
||||||
|
|
||||||
check_getvalues();
|
check_getvalues();
|
||||||
@ -2793,7 +2817,7 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize)));
|
ASSERT_OK(Put(1, std::to_string(key), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -2825,7 +2849,7 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) {
|
|||||||
|
|
||||||
// make sure all key-values are still there.
|
// make sure all key-values are still there.
|
||||||
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_NE(Get(1, ToString(key)), "NOT_FOUND");
|
ASSERT_NE(Get(1, std::to_string(key)), "NOT_FOUND");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4380,7 +4404,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) {
|
|||||||
for (CompactionFilterType comp_filter_type :
|
for (CompactionFilterType comp_filter_type :
|
||||||
{kUseCompactionFilter, kUseCompactionFilterFactory}) {
|
{kUseCompactionFilter, kUseCompactionFilterFactory}) {
|
||||||
// Assert that periodic compactions are not enabled.
|
// Assert that periodic compactions are not enabled.
|
||||||
ASSERT_EQ(port::kMaxUint64 - 1, options.periodic_compaction_seconds);
|
ASSERT_EQ(std::numeric_limits<uint64_t>::max() - 1,
|
||||||
|
options.periodic_compaction_seconds);
|
||||||
|
|
||||||
if (comp_filter_type == kUseCompactionFilter) {
|
if (comp_filter_type == kUseCompactionFilter) {
|
||||||
options.compaction_filter = &test_compaction_filter;
|
options.compaction_filter = &test_compaction_filter;
|
||||||
@ -4643,9 +4668,9 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush");
|
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush");
|
||||||
ASSERT_OK(Put(ToString(0), rnd.RandomString(1024)));
|
ASSERT_OK(Put(std::to_string(0), rnd.RandomString(1024)));
|
||||||
ASSERT_OK(dbfull()->Flush(flush_opts));
|
ASSERT_OK(dbfull()->Flush(flush_opts));
|
||||||
ASSERT_OK(Put(ToString(0), rnd.RandomString(1024)));
|
ASSERT_OK(Put(std::to_string(0), rnd.RandomString(1024)));
|
||||||
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush");
|
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush");
|
||||||
manual_compaction_thread.join();
|
manual_compaction_thread.join();
|
||||||
|
|
||||||
@ -4654,7 +4679,7 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) {
|
|||||||
std::string num_keys_in_memtable;
|
std::string num_keys_in_memtable;
|
||||||
ASSERT_TRUE(db_->GetProperty(DB::Properties::kNumEntriesActiveMemTable,
|
ASSERT_TRUE(db_->GetProperty(DB::Properties::kNumEntriesActiveMemTable,
|
||||||
&num_keys_in_memtable));
|
&num_keys_in_memtable));
|
||||||
ASSERT_EQ(ToString(1), num_keys_in_memtable);
|
ASSERT_EQ(std::to_string(1), num_keys_in_memtable);
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
}
|
}
|
||||||
@ -4803,7 +4828,7 @@ TEST_F(DBCompactionTest, SubcompactionEvent) {
|
|||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 10 + j;
|
int key_id = i * 10 + j;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -4813,7 +4838,7 @@ TEST_F(DBCompactionTest, SubcompactionEvent) {
|
|||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
int key_id = i * 20 + j * 2;
|
int key_id = i * 20 + j * 2;
|
||||||
ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id)));
|
ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -5805,7 +5830,7 @@ TEST_P(DBCompactionTestWithBottommostParam, SequenceKeysManualCompaction) {
|
|||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
|
|
||||||
ASSERT_EQ(ToString(kSstNum), FilesPerLevel(0));
|
ASSERT_EQ(std::to_string(kSstNum), FilesPerLevel(0));
|
||||||
|
|
||||||
auto cro = CompactRangeOptions();
|
auto cro = CompactRangeOptions();
|
||||||
cro.bottommost_level_compaction = bottommost_level_compaction_;
|
cro.bottommost_level_compaction = bottommost_level_compaction_;
|
||||||
@ -5818,7 +5843,7 @@ TEST_P(DBCompactionTestWithBottommostParam, SequenceKeysManualCompaction) {
|
|||||||
ASSERT_EQ("0,1", FilesPerLevel(0));
|
ASSERT_EQ("0,1", FilesPerLevel(0));
|
||||||
} else {
|
} else {
|
||||||
// Just trivial move from level 0 -> 1
|
// Just trivial move from level 0 -> 1
|
||||||
ASSERT_EQ("0," + ToString(kSstNum), FilesPerLevel(0));
|
ASSERT_EQ("0," + std::to_string(kSstNum), FilesPerLevel(0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6484,20 +6509,29 @@ TEST_F(DBCompactionTest, CompactionWithBlobGCError_CorruptIndex) {
|
|||||||
ASSERT_OK(Put(third_key, third_value));
|
ASSERT_OK(Put(third_key, third_value));
|
||||||
|
|
||||||
constexpr char fourth_key[] = "fourth_key";
|
constexpr char fourth_key[] = "fourth_key";
|
||||||
constexpr char corrupt_blob_index[] = "foobar";
|
constexpr char fourth_value[] = "fourth_value";
|
||||||
|
ASSERT_OK(Put(fourth_key, fourth_value));
|
||||||
WriteBatch batch;
|
|
||||||
ASSERT_OK(WriteBatchInternal::PutBlobIndex(&batch, 0, fourth_key,
|
|
||||||
corrupt_blob_index));
|
|
||||||
ASSERT_OK(db_->Write(WriteOptions(), &batch));
|
|
||||||
|
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"CompactionIterator::GarbageCollectBlobIfNeeded::TamperWithBlobIndex",
|
||||||
|
[](void* arg) {
|
||||||
|
Slice* const blob_index = static_cast<Slice*>(arg);
|
||||||
|
assert(blob_index);
|
||||||
|
assert(!blob_index->empty());
|
||||||
|
blob_index->remove_prefix(1);
|
||||||
|
});
|
||||||
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
constexpr Slice* begin = nullptr;
|
constexpr Slice* begin = nullptr;
|
||||||
constexpr Slice* end = nullptr;
|
constexpr Slice* end = nullptr;
|
||||||
|
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(
|
||||||
db_->CompactRange(CompactRangeOptions(), begin, end).IsCorruption());
|
db_->CompactRange(CompactRangeOptions(), begin, end).IsCorruption());
|
||||||
|
|
||||||
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBCompactionTest, CompactionWithBlobGCError_InlinedTTLIndex) {
|
TEST_F(DBCompactionTest, CompactionWithBlobGCError_InlinedTTLIndex) {
|
||||||
@ -7140,7 +7174,7 @@ TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFull) {
|
|||||||
ASSERT_OK(Put(Key(2), "value2"));
|
ASSERT_OK(Put(Key(2), "value2"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
|
ASSERT_EQ(std::to_string(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
|
||||||
|
|
||||||
db_->DisableManualCompaction();
|
db_->DisableManualCompaction();
|
||||||
|
|
||||||
@ -7197,7 +7231,7 @@ TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFullDBClose) {
|
|||||||
ASSERT_OK(Put(Key(2), "value2"));
|
ASSERT_OK(Put(Key(2), "value2"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
|
ASSERT_EQ(std::to_string(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
|
||||||
|
|
||||||
db_->DisableManualCompaction();
|
db_->DisableManualCompaction();
|
||||||
|
|
||||||
@ -7257,7 +7291,7 @@ TEST_F(DBCompactionTest, DBCloseWithManualCompaction) {
|
|||||||
ASSERT_OK(Put(Key(2), "value2"));
|
ASSERT_OK(Put(Key(2), "value2"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
|
ASSERT_EQ(std::to_string(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
|
||||||
|
|
||||||
// Close DB with manual compaction and auto triggered compaction in the queue.
|
// Close DB with manual compaction and auto triggered compaction in the queue.
|
||||||
auto s = db_->Close();
|
auto s = db_->Close();
|
||||||
|
@ -177,7 +177,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
|
|||||||
VectorLogPtr live_wal_files;
|
VectorLogPtr live_wal_files;
|
||||||
bool flush_memtable = true;
|
bool flush_memtable = true;
|
||||||
if (!immutable_db_options_.allow_2pc) {
|
if (!immutable_db_options_.allow_2pc) {
|
||||||
if (opts.wal_size_for_flush == port::kMaxUint64) {
|
if (opts.wal_size_for_flush == std::numeric_limits<uint64_t>::max()) {
|
||||||
flush_memtable = false;
|
flush_memtable = false;
|
||||||
} else if (opts.wal_size_for_flush > 0) {
|
} else if (opts.wal_size_for_flush > 0) {
|
||||||
// If the outstanding log files are small, we skip the flush.
|
// If the outstanding log files are small, we skip the flush.
|
||||||
|
@ -2356,7 +2356,7 @@ TEST_P(DBAtomicFlushTest, PrecomputeMinLogNumberToKeepNon2PC) {
|
|||||||
ASSERT_OK(Flush(cf_ids));
|
ASSERT_OK(Flush(cf_ids));
|
||||||
uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber();
|
uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber();
|
||||||
|
|
||||||
uint64_t min_log_number_to_keep = port::kMaxUint64;
|
uint64_t min_log_number_to_keep = std::numeric_limits<uint64_t>::max();
|
||||||
autovector<ColumnFamilyData*> flushed_cfds;
|
autovector<ColumnFamilyData*> flushed_cfds;
|
||||||
autovector<autovector<VersionEdit*>> flush_edits;
|
autovector<autovector<VersionEdit*>> flush_edits;
|
||||||
for (size_t i = 0; i != num_cfs; ++i) {
|
for (size_t i = 0; i != num_cfs; ++i) {
|
||||||
|
@ -101,6 +101,7 @@
|
|||||||
#include "util/compression.h"
|
#include "util/compression.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
#include "util/defer.h"
|
#include "util/defer.h"
|
||||||
|
#include "util/hash_containers.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/stop_watch.h"
|
#include "util/stop_watch.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
@ -566,7 +567,7 @@ Status DBImpl::CloseHelper() {
|
|||||||
// flushing by first checking if there is a need for
|
// flushing by first checking if there is a need for
|
||||||
// flushing (but need to implement something
|
// flushing (but need to implement something
|
||||||
// else than imm()->IsFlushPending() because the output
|
// else than imm()->IsFlushPending() because the output
|
||||||
// memtables added to imm() dont trigger flushes).
|
// memtables added to imm() don't trigger flushes).
|
||||||
if (immutable_db_options_.experimental_mempurge_threshold > 0.0) {
|
if (immutable_db_options_.experimental_mempurge_threshold > 0.0) {
|
||||||
Status flush_ret;
|
Status flush_ret;
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
@ -848,7 +849,8 @@ void DBImpl::PersistStats() {
|
|||||||
if (stats_slice_.find(stat.first) != stats_slice_.end()) {
|
if (stats_slice_.find(stat.first) != stats_slice_.end()) {
|
||||||
uint64_t delta = stat.second - stats_slice_[stat.first];
|
uint64_t delta = stat.second - stats_slice_[stat.first];
|
||||||
s = batch.Put(persist_stats_cf_handle_,
|
s = batch.Put(persist_stats_cf_handle_,
|
||||||
Slice(key, std::min(100, length)), ToString(delta));
|
Slice(key, std::min(100, length)),
|
||||||
|
std::to_string(delta));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2000,7 +2002,7 @@ std::vector<Status> DBImpl::MultiGet(
|
|||||||
|
|
||||||
SequenceNumber consistent_seqnum;
|
SequenceNumber consistent_seqnum;
|
||||||
|
|
||||||
std::unordered_map<uint32_t, MultiGetColumnFamilyData> multiget_cf_data(
|
UnorderedMap<uint32_t, MultiGetColumnFamilyData> multiget_cf_data(
|
||||||
column_family.size());
|
column_family.size());
|
||||||
for (auto cf : column_family) {
|
for (auto cf : column_family) {
|
||||||
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(cf);
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(cf);
|
||||||
@ -2012,13 +2014,13 @@ std::vector<Status> DBImpl::MultiGet(
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::function<MultiGetColumnFamilyData*(
|
std::function<MultiGetColumnFamilyData*(
|
||||||
std::unordered_map<uint32_t, MultiGetColumnFamilyData>::iterator&)>
|
UnorderedMap<uint32_t, MultiGetColumnFamilyData>::iterator&)>
|
||||||
iter_deref_lambda =
|
iter_deref_lambda =
|
||||||
[](std::unordered_map<uint32_t, MultiGetColumnFamilyData>::iterator&
|
[](UnorderedMap<uint32_t, MultiGetColumnFamilyData>::iterator&
|
||||||
cf_iter) { return &cf_iter->second; };
|
cf_iter) { return &cf_iter->second; };
|
||||||
|
|
||||||
bool unref_only =
|
bool unref_only =
|
||||||
MultiCFSnapshot<std::unordered_map<uint32_t, MultiGetColumnFamilyData>>(
|
MultiCFSnapshot<UnorderedMap<uint32_t, MultiGetColumnFamilyData>>(
|
||||||
read_options, nullptr, iter_deref_lambda, &multiget_cf_data,
|
read_options, nullptr, iter_deref_lambda, &multiget_cf_data,
|
||||||
&consistent_seqnum);
|
&consistent_seqnum);
|
||||||
|
|
||||||
@ -3354,7 +3356,7 @@ bool DBImpl::GetProperty(ColumnFamilyHandle* column_family,
|
|||||||
bool ret_value =
|
bool ret_value =
|
||||||
GetIntPropertyInternal(cfd, *property_info, false, &int_value);
|
GetIntPropertyInternal(cfd, *property_info, false, &int_value);
|
||||||
if (ret_value) {
|
if (ret_value) {
|
||||||
*value = ToString(int_value);
|
*value = std::to_string(int_value);
|
||||||
}
|
}
|
||||||
return ret_value;
|
return ret_value;
|
||||||
} else if (property_info->handle_string) {
|
} else if (property_info->handle_string) {
|
||||||
@ -3681,6 +3683,11 @@ Status DBImpl::GetUpdatesSince(
|
|||||||
SequenceNumber seq, std::unique_ptr<TransactionLogIterator>* iter,
|
SequenceNumber seq, std::unique_ptr<TransactionLogIterator>* iter,
|
||||||
const TransactionLogIterator::ReadOptions& read_options) {
|
const TransactionLogIterator::ReadOptions& read_options) {
|
||||||
RecordTick(stats_, GET_UPDATES_SINCE_CALLS);
|
RecordTick(stats_, GET_UPDATES_SINCE_CALLS);
|
||||||
|
if (seq_per_batch_) {
|
||||||
|
return Status::NotSupported(
|
||||||
|
"This API is not yet compatible with write-prepared/write-unprepared "
|
||||||
|
"transactions");
|
||||||
|
}
|
||||||
if (seq > versions_->LastSequence()) {
|
if (seq > versions_->LastSequence()) {
|
||||||
return Status::NotFound("Requested sequence not yet written in the db");
|
return Status::NotFound("Requested sequence not yet written in the db");
|
||||||
}
|
}
|
||||||
@ -3984,8 +3991,8 @@ Status DBImpl::CheckConsistency() {
|
|||||||
} else if (fsize != md.size) {
|
} else if (fsize != md.size) {
|
||||||
corruption_messages += "Sst file size mismatch: " + file_path +
|
corruption_messages += "Sst file size mismatch: " + file_path +
|
||||||
". Size recorded in manifest " +
|
". Size recorded in manifest " +
|
||||||
ToString(md.size) + ", actual size " +
|
std::to_string(md.size) + ", actual size " +
|
||||||
ToString(fsize) + "\n";
|
std::to_string(fsize) + "\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5117,8 +5124,8 @@ Status DBImpl::VerifyChecksumInternal(const ReadOptions& read_options,
|
|||||||
fmeta->file_checksum_func_name, fname,
|
fmeta->file_checksum_func_name, fname,
|
||||||
read_options);
|
read_options);
|
||||||
} else {
|
} else {
|
||||||
s = ROCKSDB_NAMESPACE::VerifySstFileChecksum(opts, file_options_,
|
s = ROCKSDB_NAMESPACE::VerifySstFileChecksum(
|
||||||
read_options, fname);
|
opts, file_options_, read_options, fname, fd.largest_seqno);
|
||||||
}
|
}
|
||||||
RecordTick(stats_, VERIFY_CHECKSUM_READ_BYTES,
|
RecordTick(stats_, VERIFY_CHECKSUM_READ_BYTES,
|
||||||
IOSTATS(bytes_read) - prev_bytes_read);
|
IOSTATS(bytes_read) - prev_bytes_read);
|
||||||
@ -5332,7 +5339,7 @@ Status DBImpl::ReserveFileNumbersBeforeIngestion(
|
|||||||
|
|
||||||
Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
|
Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
|
||||||
if (mutable_db_options_.max_open_files == -1) {
|
if (mutable_db_options_.max_open_files == -1) {
|
||||||
uint64_t oldest_time = port::kMaxUint64;
|
uint64_t oldest_time = std::numeric_limits<uint64_t>::max();
|
||||||
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
||||||
if (!cfd->IsDropped()) {
|
if (!cfd->IsDropped()) {
|
||||||
uint64_t ctime;
|
uint64_t ctime;
|
||||||
|
@ -2299,7 +2299,7 @@ class DBImpl : public DB {
|
|||||||
|
|
||||||
static const int KEEP_LOG_FILE_NUM = 1000;
|
static const int KEEP_LOG_FILE_NUM = 1000;
|
||||||
// MSVC version 1800 still does not have constexpr for ::max()
|
// MSVC version 1800 still does not have constexpr for ::max()
|
||||||
static const uint64_t kNoTimeOut = port::kMaxUint64;
|
static const uint64_t kNoTimeOut = std::numeric_limits<uint64_t>::max();
|
||||||
|
|
||||||
std::string db_absolute_path_;
|
std::string db_absolute_path_;
|
||||||
|
|
||||||
@ -2369,11 +2369,6 @@ class DBImpl : public DB {
|
|||||||
// DB::Open() or passed to us
|
// DB::Open() or passed to us
|
||||||
bool own_sfm_;
|
bool own_sfm_;
|
||||||
|
|
||||||
// Default value is 0 which means ALL deletes are
|
|
||||||
// preserved. Note that this has no effect if preserve_deletes is false.
|
|
||||||
const std::atomic<SequenceNumber> preserve_deletes_seqnum_{0};
|
|
||||||
const bool preserve_deletes_ = false;
|
|
||||||
|
|
||||||
// Flag to check whether Close() has been called on this DB
|
// Flag to check whether Close() has been called on this DB
|
||||||
bool closed_;
|
bool closed_;
|
||||||
// save the closing status, for re-calling the close()
|
// save the closing status, for re-calling the close()
|
||||||
|
@ -188,7 +188,7 @@ Status DBImpl::FlushMemTableToOutputFile(
|
|||||||
// a memtable without knowing such snapshot(s).
|
// a memtable without knowing such snapshot(s).
|
||||||
uint64_t max_memtable_id = needs_to_sync_closed_wals
|
uint64_t max_memtable_id = needs_to_sync_closed_wals
|
||||||
? cfd->imm()->GetLatestMemTableID()
|
? cfd->imm()->GetLatestMemTableID()
|
||||||
: port::kMaxUint64;
|
: std::numeric_limits<uint64_t>::max();
|
||||||
|
|
||||||
// If needs_to_sync_closed_wals is false, then the flush job will pick ALL
|
// If needs_to_sync_closed_wals is false, then the flush job will pick ALL
|
||||||
// existing memtables of the column family when PickMemTable() is called
|
// existing memtables of the column family when PickMemTable() is called
|
||||||
@ -1041,7 +1041,8 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
|
|||||||
}
|
}
|
||||||
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
|
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
|
||||||
final_output_level, options, begin, end, exclusive,
|
final_output_level, options, begin, end, exclusive,
|
||||||
false, port::kMaxUint64, trim_ts);
|
false, std::numeric_limits<uint64_t>::max(),
|
||||||
|
trim_ts);
|
||||||
} else {
|
} else {
|
||||||
int first_overlapped_level = kInvalidLevel;
|
int first_overlapped_level = kInvalidLevel;
|
||||||
int max_overlapped_level = kInvalidLevel;
|
int max_overlapped_level = kInvalidLevel;
|
||||||
@ -1078,7 +1079,7 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
|
|||||||
if (s.ok() && first_overlapped_level != kInvalidLevel) {
|
if (s.ok() && first_overlapped_level != kInvalidLevel) {
|
||||||
// max_file_num_to_ignore can be used to filter out newly created SST
|
// max_file_num_to_ignore can be used to filter out newly created SST
|
||||||
// files, useful for bottom level compaction in a manual compaction
|
// files, useful for bottom level compaction in a manual compaction
|
||||||
uint64_t max_file_num_to_ignore = port::kMaxUint64;
|
uint64_t max_file_num_to_ignore = std::numeric_limits<uint64_t>::max();
|
||||||
uint64_t next_file_number = versions_->current_next_file_number();
|
uint64_t next_file_number = versions_->current_next_file_number();
|
||||||
final_output_level = max_overlapped_level;
|
final_output_level = max_overlapped_level;
|
||||||
int output_level;
|
int output_level;
|
||||||
@ -1363,11 +1364,11 @@ Status DBImpl::CompactFilesImpl(
|
|||||||
CompactionJob compaction_job(
|
CompactionJob compaction_job(
|
||||||
job_context->job_id, c.get(), immutable_db_options_, mutable_db_options_,
|
job_context->job_id, c.get(), immutable_db_options_, mutable_db_options_,
|
||||||
file_options_for_compaction_, versions_.get(), &shutting_down_,
|
file_options_for_compaction_, versions_.get(), &shutting_down_,
|
||||||
preserve_deletes_seqnum_.load(), log_buffer, directories_.GetDbDir(),
|
log_buffer, directories_.GetDbDir(),
|
||||||
GetDataDir(c->column_family_data(), c->output_path_id()),
|
GetDataDir(c->column_family_data(), c->output_path_id()),
|
||||||
GetDataDir(c->column_family_data(), 0), stats_, &mutex_, &error_handler_,
|
GetDataDir(c->column_family_data(), 0), stats_, &mutex_, &error_handler_,
|
||||||
snapshot_seqs, earliest_write_conflict_snapshot, snapshot_checker,
|
snapshot_seqs, earliest_write_conflict_snapshot, snapshot_checker,
|
||||||
table_cache_, &event_logger_,
|
job_context, table_cache_, &event_logger_,
|
||||||
c->mutable_cf_options()->paranoid_file_checks,
|
c->mutable_cf_options()->paranoid_file_checks,
|
||||||
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
||||||
&compaction_job_stats, Env::Priority::USER, io_tracer_,
|
&compaction_job_stats, Env::Priority::USER, io_tracer_,
|
||||||
@ -2013,7 +2014,7 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
|
|||||||
// be created and scheduled, status::OK() will be returned.
|
// be created and scheduled, status::OK() will be returned.
|
||||||
s = SwitchMemtable(cfd, &context);
|
s = SwitchMemtable(cfd, &context);
|
||||||
}
|
}
|
||||||
const uint64_t flush_memtable_id = port::kMaxUint64;
|
const uint64_t flush_memtable_id = std::numeric_limits<uint64_t>::max();
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
|
if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
|
||||||
!cached_recoverable_state_empty_.load()) {
|
!cached_recoverable_state_empty_.load()) {
|
||||||
@ -3357,12 +3358,11 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
|
|||||||
CompactionJob compaction_job(
|
CompactionJob compaction_job(
|
||||||
job_context->job_id, c.get(), immutable_db_options_,
|
job_context->job_id, c.get(), immutable_db_options_,
|
||||||
mutable_db_options_, file_options_for_compaction_, versions_.get(),
|
mutable_db_options_, file_options_for_compaction_, versions_.get(),
|
||||||
&shutting_down_, preserve_deletes_seqnum_.load(), log_buffer,
|
&shutting_down_, log_buffer, directories_.GetDbDir(),
|
||||||
directories_.GetDbDir(),
|
|
||||||
GetDataDir(c->column_family_data(), c->output_path_id()),
|
GetDataDir(c->column_family_data(), c->output_path_id()),
|
||||||
GetDataDir(c->column_family_data(), 0), stats_, &mutex_,
|
GetDataDir(c->column_family_data(), 0), stats_, &mutex_,
|
||||||
&error_handler_, snapshot_seqs, earliest_write_conflict_snapshot,
|
&error_handler_, snapshot_seqs, earliest_write_conflict_snapshot,
|
||||||
snapshot_checker, table_cache_, &event_logger_,
|
snapshot_checker, job_context, table_cache_, &event_logger_,
|
||||||
c->mutable_cf_options()->paranoid_file_checks,
|
c->mutable_cf_options()->paranoid_file_checks,
|
||||||
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
||||||
&compaction_job_stats, thread_pri, io_tracer_,
|
&compaction_job_stats, thread_pri, io_tracer_,
|
||||||
|
@ -118,10 +118,11 @@ Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
|
|||||||
cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
|
cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
|
||||||
? level
|
? level
|
||||||
: level + 1;
|
: level + 1;
|
||||||
return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(),
|
return RunManualCompaction(
|
||||||
begin, end, true, disallow_trivial_move,
|
cfd, level, output_level, CompactRangeOptions(), begin, end, true,
|
||||||
port::kMaxUint64 /*max_file_num_to_ignore*/,
|
disallow_trivial_move,
|
||||||
"" /*trim_ts*/);
|
std::numeric_limits<uint64_t>::max() /*max_file_num_to_ignore*/,
|
||||||
|
"" /*trim_ts*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {
|
Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {
|
||||||
|
@ -761,7 +761,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC(
|
|||||||
assert(!cfds_to_flush.empty());
|
assert(!cfds_to_flush.empty());
|
||||||
assert(cfds_to_flush.size() == edit_lists.size());
|
assert(cfds_to_flush.size() == edit_lists.size());
|
||||||
|
|
||||||
uint64_t min_log_number_to_keep = port::kMaxUint64;
|
uint64_t min_log_number_to_keep = std::numeric_limits<uint64_t>::max();
|
||||||
for (const auto& edit_list : edit_lists) {
|
for (const auto& edit_list : edit_lists) {
|
||||||
uint64_t log = 0;
|
uint64_t log = 0;
|
||||||
for (const auto& e : edit_list) {
|
for (const auto& e : edit_list) {
|
||||||
@ -773,7 +773,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC(
|
|||||||
min_log_number_to_keep = std::min(min_log_number_to_keep, log);
|
min_log_number_to_keep = std::min(min_log_number_to_keep, log);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (min_log_number_to_keep == port::kMaxUint64) {
|
if (min_log_number_to_keep == std::numeric_limits<uint64_t>::max()) {
|
||||||
min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber();
|
min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber();
|
||||||
for (size_t i = 1; i < cfds_to_flush.size(); i++) {
|
for (size_t i = 1; i < cfds_to_flush.size(); i++) {
|
||||||
min_log_number_to_keep =
|
min_log_number_to_keep =
|
||||||
|
@ -760,11 +760,11 @@ Status DBImpl::PersistentStatsProcessFormatVersion() {
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = batch.Put(persist_stats_cf_handle_, kFormatVersionKeyString,
|
s = batch.Put(persist_stats_cf_handle_, kFormatVersionKeyString,
|
||||||
ToString(kStatsCFCurrentFormatVersion));
|
std::to_string(kStatsCFCurrentFormatVersion));
|
||||||
}
|
}
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = batch.Put(persist_stats_cf_handle_, kCompatibleVersionKeyString,
|
s = batch.Put(persist_stats_cf_handle_, kCompatibleVersionKeyString,
|
||||||
ToString(kStatsCFCompatibleFormatVersion));
|
std::to_string(kStatsCFCompatibleFormatVersion));
|
||||||
}
|
}
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
WriteOptions wo;
|
WriteOptions wo;
|
||||||
@ -947,7 +947,6 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& wal_numbers,
|
|||||||
// Read all the records and add to a memtable
|
// Read all the records and add to a memtable
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
Slice record;
|
Slice record;
|
||||||
WriteBatch batch;
|
|
||||||
|
|
||||||
TEST_SYNC_POINT_CALLBACK("DBImpl::RecoverLogFiles:BeforeReadWal",
|
TEST_SYNC_POINT_CALLBACK("DBImpl::RecoverLogFiles:BeforeReadWal",
|
||||||
/*arg=*/nullptr);
|
/*arg=*/nullptr);
|
||||||
@ -961,10 +960,15 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& wal_numbers,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We create a new batch and initialize with a valid prot_info_ to store
|
||||||
|
// the data checksums
|
||||||
|
WriteBatch batch(0, 0, 8, 0);
|
||||||
|
|
||||||
status = WriteBatchInternal::SetContents(&batch, record);
|
status = WriteBatchInternal::SetContents(&batch, record);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
SequenceNumber sequence = WriteBatchInternal::Sequence(&batch);
|
SequenceNumber sequence = WriteBatchInternal::Sequence(&batch);
|
||||||
|
|
||||||
if (immutable_db_options_.wal_recovery_mode ==
|
if (immutable_db_options_.wal_recovery_mode ==
|
||||||
@ -1322,6 +1326,7 @@ Status DBImpl::GetLogSizeAndMaybeTruncate(uint64_t wal_number, bool truncate,
|
|||||||
Status s;
|
Status s;
|
||||||
// This gets the appear size of the wals, not including preallocated space.
|
// This gets the appear size of the wals, not including preallocated space.
|
||||||
s = env_->GetFileSize(fname, &log.size);
|
s = env_->GetFileSize(fname, &log.size);
|
||||||
|
TEST_SYNC_POINT_CALLBACK("DBImpl::GetLogSizeAndMaybeTruncate:0", /*arg=*/&s);
|
||||||
if (s.ok() && truncate) {
|
if (s.ok() && truncate) {
|
||||||
std::unique_ptr<FSWritableFile> last_log;
|
std::unique_ptr<FSWritableFile> last_log;
|
||||||
Status truncate_status = fs_->ReopenWritableFile(
|
Status truncate_status = fs_->ReopenWritableFile(
|
||||||
@ -1465,9 +1470,9 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
|
|||||||
dbname_, versions_.get(), immutable_db_options_, tboptions,
|
dbname_, versions_.get(), immutable_db_options_, tboptions,
|
||||||
file_options_for_compaction_, cfd->table_cache(), iter.get(),
|
file_options_for_compaction_, cfd->table_cache(), iter.get(),
|
||||||
std::move(range_del_iters), &meta, &blob_file_additions,
|
std::move(range_del_iters), &meta, &blob_file_additions,
|
||||||
snapshot_seqs, earliest_write_conflict_snapshot, snapshot_checker,
|
snapshot_seqs, earliest_write_conflict_snapshot, kMaxSequenceNumber,
|
||||||
paranoid_file_checks, cfd->internal_stats(), &io_s, io_tracer_,
|
snapshot_checker, paranoid_file_checks, cfd->internal_stats(), &io_s,
|
||||||
BlobFileCreationReason::kRecovery, &event_logger_, job_id,
|
io_tracer_, BlobFileCreationReason::kRecovery, &event_logger_, job_id,
|
||||||
Env::IO_HIGH, nullptr /* table_properties */, write_hint,
|
Env::IO_HIGH, nullptr /* table_properties */, write_hint,
|
||||||
nullptr /*full_history_ts_low*/, &blob_callback_);
|
nullptr /*full_history_ts_low*/, &blob_callback_);
|
||||||
LogFlush(immutable_db_options_.info_log);
|
LogFlush(immutable_db_options_.info_log);
|
||||||
@ -1821,6 +1826,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
|||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// Need to fsync, otherwise it might get lost after a power reset.
|
// Need to fsync, otherwise it might get lost after a power reset.
|
||||||
s = impl->FlushWAL(false);
|
s = impl->FlushWAL(false);
|
||||||
|
TEST_SYNC_POINT_CALLBACK("DBImpl::Open::BeforeSyncWAL", /*arg=*/&s);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = log_writer->file()->Sync(impl->immutable_db_options_.use_fsync);
|
s = log_writer->file()->Sync(impl->immutable_db_options_.use_fsync);
|
||||||
}
|
}
|
||||||
|
@ -247,15 +247,16 @@ Status DBImplSecondary::RecoverLogFiles(
|
|||||||
if (seq_of_batch <= seq) {
|
if (seq_of_batch <= seq) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
auto curr_log_num = port::kMaxUint64;
|
auto curr_log_num = std::numeric_limits<uint64_t>::max();
|
||||||
if (cfd_to_current_log_.count(cfd) > 0) {
|
if (cfd_to_current_log_.count(cfd) > 0) {
|
||||||
curr_log_num = cfd_to_current_log_[cfd];
|
curr_log_num = cfd_to_current_log_[cfd];
|
||||||
}
|
}
|
||||||
// If the active memtable contains records added by replaying an
|
// If the active memtable contains records added by replaying an
|
||||||
// earlier WAL, then we need to seal the memtable, add it to the
|
// earlier WAL, then we need to seal the memtable, add it to the
|
||||||
// immutable memtable list and create a new active memtable.
|
// immutable memtable list and create a new active memtable.
|
||||||
if (!cfd->mem()->IsEmpty() && (curr_log_num == port::kMaxUint64 ||
|
if (!cfd->mem()->IsEmpty() &&
|
||||||
curr_log_num != log_number)) {
|
(curr_log_num == std::numeric_limits<uint64_t>::max() ||
|
||||||
|
curr_log_num != log_number)) {
|
||||||
const MutableCFOptions mutable_cf_options =
|
const MutableCFOptions mutable_cf_options =
|
||||||
*cfd->GetLatestMutableCFOptions();
|
*cfd->GetLatestMutableCFOptions();
|
||||||
MemTable* new_mem =
|
MemTable* new_mem =
|
||||||
@ -710,8 +711,11 @@ Status DB::OpenAsSecondary(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status DBImplSecondary::CompactWithoutInstallation(
|
Status DBImplSecondary::CompactWithoutInstallation(
|
||||||
ColumnFamilyHandle* cfh, const CompactionServiceInput& input,
|
const OpenAndCompactOptions& options, ColumnFamilyHandle* cfh,
|
||||||
CompactionServiceResult* result) {
|
const CompactionServiceInput& input, CompactionServiceResult* result) {
|
||||||
|
if (options.canceled && options.canceled->load(std::memory_order_acquire)) {
|
||||||
|
return Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
||||||
|
}
|
||||||
InstrumentedMutexLock l(&mutex_);
|
InstrumentedMutexLock l(&mutex_);
|
||||||
auto cfd = static_cast_with_check<ColumnFamilyHandleImpl>(cfh)->cfd();
|
auto cfd = static_cast_with_check<ColumnFamilyHandleImpl>(cfh)->cfd();
|
||||||
if (!cfd) {
|
if (!cfd) {
|
||||||
@ -773,7 +777,7 @@ Status DBImplSecondary::CompactWithoutInstallation(
|
|||||||
file_options_for_compaction_, versions_.get(), &shutting_down_,
|
file_options_for_compaction_, versions_.get(), &shutting_down_,
|
||||||
&log_buffer, output_dir.get(), stats_, &mutex_, &error_handler_,
|
&log_buffer, output_dir.get(), stats_, &mutex_, &error_handler_,
|
||||||
input.snapshots, table_cache_, &event_logger_, dbname_, io_tracer_,
|
input.snapshots, table_cache_, &event_logger_, dbname_, io_tracer_,
|
||||||
db_id_, db_session_id_, secondary_path_, input, result);
|
options.canceled, db_id_, db_session_id_, secondary_path_, input, result);
|
||||||
|
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
s = compaction_job.Run();
|
s = compaction_job.Run();
|
||||||
@ -792,9 +796,13 @@ Status DBImplSecondary::CompactWithoutInstallation(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status DB::OpenAndCompact(
|
Status DB::OpenAndCompact(
|
||||||
const std::string& name, const std::string& output_directory,
|
const OpenAndCompactOptions& options, const std::string& name,
|
||||||
const std::string& input, std::string* result,
|
const std::string& output_directory, const std::string& input,
|
||||||
|
std::string* output,
|
||||||
const CompactionServiceOptionsOverride& override_options) {
|
const CompactionServiceOptionsOverride& override_options) {
|
||||||
|
if (options.canceled && options.canceled->load(std::memory_order_acquire)) {
|
||||||
|
return Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
||||||
|
}
|
||||||
CompactionServiceInput compaction_input;
|
CompactionServiceInput compaction_input;
|
||||||
Status s = CompactionServiceInput::Read(input, &compaction_input);
|
Status s = CompactionServiceInput::Read(input, &compaction_input);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
@ -824,6 +832,7 @@ Status DB::OpenAndCompact(
|
|||||||
override_options.table_factory;
|
override_options.table_factory;
|
||||||
compaction_input.column_family.options.sst_partitioner_factory =
|
compaction_input.column_family.options.sst_partitioner_factory =
|
||||||
override_options.sst_partitioner_factory;
|
override_options.sst_partitioner_factory;
|
||||||
|
compaction_input.db_options.listeners = override_options.listeners;
|
||||||
|
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(compaction_input.column_family);
|
column_families.push_back(compaction_input.column_family);
|
||||||
@ -847,10 +856,10 @@ Status DB::OpenAndCompact(
|
|||||||
CompactionServiceResult compaction_result;
|
CompactionServiceResult compaction_result;
|
||||||
DBImplSecondary* db_secondary = static_cast_with_check<DBImplSecondary>(db);
|
DBImplSecondary* db_secondary = static_cast_with_check<DBImplSecondary>(db);
|
||||||
assert(handles.size() > 0);
|
assert(handles.size() > 0);
|
||||||
s = db_secondary->CompactWithoutInstallation(handles[0], compaction_input,
|
s = db_secondary->CompactWithoutInstallation(
|
||||||
&compaction_result);
|
options, handles[0], compaction_input, &compaction_result);
|
||||||
|
|
||||||
Status serialization_status = compaction_result.Write(result);
|
Status serialization_status = compaction_result.Write(output);
|
||||||
|
|
||||||
for (auto& handle : handles) {
|
for (auto& handle : handles) {
|
||||||
delete handle;
|
delete handle;
|
||||||
@ -862,6 +871,14 @@ Status DB::OpenAndCompact(
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Status DB::OpenAndCompact(
|
||||||
|
const std::string& name, const std::string& output_directory,
|
||||||
|
const std::string& input, std::string* output,
|
||||||
|
const CompactionServiceOptionsOverride& override_options) {
|
||||||
|
return OpenAndCompact(OpenAndCompactOptions(), name, output_directory, input,
|
||||||
|
output, override_options);
|
||||||
|
}
|
||||||
|
|
||||||
#else // !ROCKSDB_LITE
|
#else // !ROCKSDB_LITE
|
||||||
|
|
||||||
Status DB::OpenAsSecondary(const Options& /*options*/,
|
Status DB::OpenAsSecondary(const Options& /*options*/,
|
||||||
|
@ -228,10 +228,11 @@ class DBImplSecondary : public DBImpl {
|
|||||||
Status CheckConsistency() override;
|
Status CheckConsistency() override;
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
Status TEST_CompactWithoutInstallation(ColumnFamilyHandle* cfh,
|
Status TEST_CompactWithoutInstallation(const OpenAndCompactOptions& options,
|
||||||
|
ColumnFamilyHandle* cfh,
|
||||||
const CompactionServiceInput& input,
|
const CompactionServiceInput& input,
|
||||||
CompactionServiceResult* result) {
|
CompactionServiceResult* result) {
|
||||||
return CompactWithoutInstallation(cfh, input, result);
|
return CompactWithoutInstallation(options, cfh, input, result);
|
||||||
}
|
}
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
|
|
||||||
@ -346,7 +347,8 @@ class DBImplSecondary : public DBImpl {
|
|||||||
// Run compaction without installation, the output files will be placed in the
|
// Run compaction without installation, the output files will be placed in the
|
||||||
// secondary DB path. The LSM tree won't be changed, the secondary DB is still
|
// secondary DB path. The LSM tree won't be changed, the secondary DB is still
|
||||||
// in read-only mode.
|
// in read-only mode.
|
||||||
Status CompactWithoutInstallation(ColumnFamilyHandle* cfh,
|
Status CompactWithoutInstallation(const OpenAndCompactOptions& options,
|
||||||
|
ColumnFamilyHandle* cfh,
|
||||||
const CompactionServiceInput& input,
|
const CompactionServiceInput& input,
|
||||||
CompactionServiceResult* result);
|
CompactionServiceResult* result);
|
||||||
|
|
||||||
|
@ -35,10 +35,12 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
|||||||
Header(options.info_log, "DB SUMMARY\n");
|
Header(options.info_log, "DB SUMMARY\n");
|
||||||
Header(options.info_log, "DB Session ID: %s\n", session_id.c_str());
|
Header(options.info_log, "DB Session ID: %s\n", session_id.c_str());
|
||||||
|
|
||||||
|
Status s;
|
||||||
// Get files in dbname dir
|
// Get files in dbname dir
|
||||||
if (!env->GetChildren(dbname, &files).ok()) {
|
s = env->GetChildren(dbname, &files);
|
||||||
Error(options.info_log,
|
if (!s.ok()) {
|
||||||
"Error when reading %s dir\n", dbname.c_str());
|
Error(options.info_log, "Error when reading %s dir %s\n", dbname.c_str(),
|
||||||
|
s.ToString().c_str());
|
||||||
}
|
}
|
||||||
std::sort(files.begin(), files.end());
|
std::sort(files.begin(), files.end());
|
||||||
for (const std::string& file : files) {
|
for (const std::string& file : files) {
|
||||||
@ -53,24 +55,27 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
|||||||
Header(options.info_log, "IDENTITY file: %s\n", file.c_str());
|
Header(options.info_log, "IDENTITY file: %s\n", file.c_str());
|
||||||
break;
|
break;
|
||||||
case kDescriptorFile:
|
case kDescriptorFile:
|
||||||
if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) {
|
s = env->GetFileSize(dbname + "/" + file, &file_size);
|
||||||
|
if (s.ok()) {
|
||||||
Header(options.info_log,
|
Header(options.info_log,
|
||||||
"MANIFEST file: %s size: %" PRIu64 " Bytes\n", file.c_str(),
|
"MANIFEST file: %s size: %" PRIu64 " Bytes\n", file.c_str(),
|
||||||
file_size);
|
file_size);
|
||||||
} else {
|
} else {
|
||||||
Error(options.info_log, "Error when reading MANIFEST file: %s/%s\n",
|
Error(options.info_log,
|
||||||
dbname.c_str(), file.c_str());
|
"Error when reading MANIFEST file: %s/%s %s\n", dbname.c_str(),
|
||||||
|
file.c_str(), s.ToString().c_str());
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kWalFile:
|
case kWalFile:
|
||||||
if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) {
|
s = env->GetFileSize(dbname + "/" + file, &file_size);
|
||||||
|
if (s.ok()) {
|
||||||
wal_info.append(file)
|
wal_info.append(file)
|
||||||
.append(" size: ")
|
.append(" size: ")
|
||||||
.append(std::to_string(file_size))
|
.append(std::to_string(file_size))
|
||||||
.append(" ; ");
|
.append(" ; ");
|
||||||
} else {
|
} else {
|
||||||
Error(options.info_log, "Error when reading LOG file: %s/%s\n",
|
Error(options.info_log, "Error when reading LOG file: %s/%s %s\n",
|
||||||
dbname.c_str(), file.c_str());
|
dbname.c_str(), file.c_str(), s.ToString().c_str());
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTableFile:
|
case kTableFile:
|
||||||
@ -86,10 +91,10 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
|||||||
// Get sst files in db_path dir
|
// Get sst files in db_path dir
|
||||||
for (auto& db_path : options.db_paths) {
|
for (auto& db_path : options.db_paths) {
|
||||||
if (dbname.compare(db_path.path) != 0) {
|
if (dbname.compare(db_path.path) != 0) {
|
||||||
if (!env->GetChildren(db_path.path, &files).ok()) {
|
s = env->GetChildren(db_path.path, &files);
|
||||||
Error(options.info_log,
|
if (!s.ok()) {
|
||||||
"Error when reading %s dir\n",
|
Error(options.info_log, "Error when reading %s dir %s\n",
|
||||||
db_path.path.c_str());
|
db_path.path.c_str(), s.ToString().c_str());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
std::sort(files.begin(), files.end());
|
std::sort(files.begin(), files.end());
|
||||||
@ -111,22 +116,25 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
|||||||
// Get wal file in wal_dir
|
// Get wal file in wal_dir
|
||||||
const auto& wal_dir = options.GetWalDir(dbname);
|
const auto& wal_dir = options.GetWalDir(dbname);
|
||||||
if (!options.IsWalDirSameAsDBPath(dbname)) {
|
if (!options.IsWalDirSameAsDBPath(dbname)) {
|
||||||
if (!env->GetChildren(wal_dir, &files).ok()) {
|
s = env->GetChildren(wal_dir, &files);
|
||||||
Error(options.info_log, "Error when reading %s dir\n", wal_dir.c_str());
|
if (!s.ok()) {
|
||||||
|
Error(options.info_log, "Error when reading %s dir %s\n", wal_dir.c_str(),
|
||||||
|
s.ToString().c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
wal_info.clear();
|
wal_info.clear();
|
||||||
for (const std::string& file : files) {
|
for (const std::string& file : files) {
|
||||||
if (ParseFileName(file, &number, &type)) {
|
if (ParseFileName(file, &number, &type)) {
|
||||||
if (type == kWalFile) {
|
if (type == kWalFile) {
|
||||||
if (env->GetFileSize(wal_dir + "/" + file, &file_size).ok()) {
|
s = env->GetFileSize(wal_dir + "/" + file, &file_size);
|
||||||
|
if (s.ok()) {
|
||||||
wal_info.append(file)
|
wal_info.append(file)
|
||||||
.append(" size: ")
|
.append(" size: ")
|
||||||
.append(std::to_string(file_size))
|
.append(std::to_string(file_size))
|
||||||
.append(" ; ");
|
.append(" ; ");
|
||||||
} else {
|
} else {
|
||||||
Error(options.info_log, "Error when reading LOG file %s/%s\n",
|
Error(options.info_log, "Error when reading LOG file %s/%s %s\n",
|
||||||
wal_dir.c_str(), file.c_str());
|
wal_dir.c_str(), file.c_str(), s.ToString().c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,6 @@ DBIter::DBIter(Env* _env, const ReadOptions& read_options,
|
|||||||
range_del_agg_(&ioptions.internal_comparator, s),
|
range_del_agg_(&ioptions.internal_comparator, s),
|
||||||
db_impl_(db_impl),
|
db_impl_(db_impl),
|
||||||
cfd_(cfd),
|
cfd_(cfd),
|
||||||
start_seqnum_(0ULL),
|
|
||||||
timestamp_ub_(read_options.timestamp),
|
timestamp_ub_(read_options.timestamp),
|
||||||
timestamp_lb_(read_options.iter_start_ts),
|
timestamp_lb_(read_options.iter_start_ts),
|
||||||
timestamp_size_(timestamp_ub_ ? timestamp_ub_->size() : 0) {
|
timestamp_size_(timestamp_ub_ ? timestamp_ub_->size() : 0) {
|
||||||
@ -328,25 +327,7 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key,
|
|||||||
case kTypeSingleDeletion:
|
case kTypeSingleDeletion:
|
||||||
// Arrange to skip all upcoming entries for this key since
|
// Arrange to skip all upcoming entries for this key since
|
||||||
// they are hidden by this deletion.
|
// they are hidden by this deletion.
|
||||||
// if iterartor specified start_seqnum we
|
if (timestamp_lb_) {
|
||||||
// 1) return internal key, including the type
|
|
||||||
// 2) return ikey only if ikey.seqnum >= start_seqnum_
|
|
||||||
// note that if deletion seqnum is < start_seqnum_ we
|
|
||||||
// just skip it like in normal iterator.
|
|
||||||
if (start_seqnum_ > 0) {
|
|
||||||
if (ikey_.sequence >= start_seqnum_) {
|
|
||||||
saved_key_.SetInternalKey(ikey_);
|
|
||||||
valid_ = true;
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
saved_key_.SetUserKey(
|
|
||||||
ikey_.user_key,
|
|
||||||
!pin_thru_lifetime_ ||
|
|
||||||
!iter_.iter()->IsKeyPinned() /* copy */);
|
|
||||||
skipping_saved_key = true;
|
|
||||||
PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
|
|
||||||
}
|
|
||||||
} else if (timestamp_lb_) {
|
|
||||||
saved_key_.SetInternalKey(ikey_);
|
saved_key_.SetInternalKey(ikey_);
|
||||||
valid_ = true;
|
valid_ = true;
|
||||||
return true;
|
return true;
|
||||||
@ -360,28 +341,7 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key,
|
|||||||
break;
|
break;
|
||||||
case kTypeValue:
|
case kTypeValue:
|
||||||
case kTypeBlobIndex:
|
case kTypeBlobIndex:
|
||||||
if (start_seqnum_ > 0) {
|
if (timestamp_lb_) {
|
||||||
if (ikey_.sequence >= start_seqnum_) {
|
|
||||||
saved_key_.SetInternalKey(ikey_);
|
|
||||||
|
|
||||||
if (ikey_.type == kTypeBlobIndex) {
|
|
||||||
if (!SetBlobValueIfNeeded(ikey_.user_key, iter_.value())) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
valid_ = true;
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
// this key and all previous versions shouldn't be included,
|
|
||||||
// skipping_saved_key
|
|
||||||
saved_key_.SetUserKey(
|
|
||||||
ikey_.user_key,
|
|
||||||
!pin_thru_lifetime_ ||
|
|
||||||
!iter_.iter()->IsKeyPinned() /* copy */);
|
|
||||||
skipping_saved_key = true;
|
|
||||||
}
|
|
||||||
} else if (timestamp_lb_) {
|
|
||||||
saved_key_.SetInternalKey(ikey_);
|
saved_key_.SetInternalKey(ikey_);
|
||||||
|
|
||||||
if (ikey_.type == kTypeBlobIndex) {
|
if (ikey_.type == kTypeBlobIndex) {
|
||||||
|
@ -151,7 +151,7 @@ class DBIter final : public Iterator {
|
|||||||
}
|
}
|
||||||
Slice key() const override {
|
Slice key() const override {
|
||||||
assert(valid_);
|
assert(valid_);
|
||||||
if (start_seqnum_ > 0 || timestamp_lb_) {
|
if (timestamp_lb_) {
|
||||||
return saved_key_.GetInternalKey();
|
return saved_key_.GetInternalKey();
|
||||||
} else {
|
} else {
|
||||||
const Slice ukey_and_ts = saved_key_.GetUserKey();
|
const Slice ukey_and_ts = saved_key_.GetUserKey();
|
||||||
@ -371,9 +371,6 @@ class DBIter final : public Iterator {
|
|||||||
ROCKSDB_FIELD_UNUSED
|
ROCKSDB_FIELD_UNUSED
|
||||||
#endif
|
#endif
|
||||||
ColumnFamilyData* cfd_;
|
ColumnFamilyData* cfd_;
|
||||||
// for diff snapshots we want the lower bound on the seqnum;
|
|
||||||
// if this value > 0 iterator will return internal keys
|
|
||||||
SequenceNumber start_seqnum_;
|
|
||||||
const Slice* const timestamp_ub_;
|
const Slice* const timestamp_ub_;
|
||||||
const Slice* const timestamp_lb_;
|
const Slice* const timestamp_lb_;
|
||||||
const size_t timestamp_size_;
|
const size_t timestamp_size_;
|
||||||
|
@ -414,7 +414,7 @@ TEST_F(DBIteratorStressTest, StressTest) {
|
|||||||
a /= 10;
|
a /= 10;
|
||||||
++len;
|
++len;
|
||||||
}
|
}
|
||||||
std::string s = ToString(rnd.Next() % static_cast<uint64_t>(max_key));
|
std::string s = std::to_string(rnd.Next() % static_cast<uint64_t>(max_key));
|
||||||
s.insert(0, len - (int)s.size(), '0');
|
s.insert(0, len - (int)s.size(), '0');
|
||||||
return s;
|
return s;
|
||||||
};
|
};
|
||||||
@ -444,12 +444,13 @@ TEST_F(DBIteratorStressTest, StressTest) {
|
|||||||
for (double mutation_probability : {0.01, 0.5}) {
|
for (double mutation_probability : {0.01, 0.5}) {
|
||||||
for (double target_hidden_fraction : {0.1, 0.5}) {
|
for (double target_hidden_fraction : {0.1, 0.5}) {
|
||||||
std::string trace_str =
|
std::string trace_str =
|
||||||
"entries: " + ToString(num_entries) +
|
"entries: " + std::to_string(num_entries) +
|
||||||
", key_space: " + ToString(key_space) +
|
", key_space: " + std::to_string(key_space) +
|
||||||
", error_probability: " + ToString(error_probability) +
|
", error_probability: " + std::to_string(error_probability) +
|
||||||
", mutation_probability: " + ToString(mutation_probability) +
|
", mutation_probability: " +
|
||||||
|
std::to_string(mutation_probability) +
|
||||||
", target_hidden_fraction: " +
|
", target_hidden_fraction: " +
|
||||||
ToString(target_hidden_fraction);
|
std::to_string(target_hidden_fraction);
|
||||||
SCOPED_TRACE(trace_str);
|
SCOPED_TRACE(trace_str);
|
||||||
if (trace) {
|
if (trace) {
|
||||||
std::cout << trace_str << std::endl;
|
std::cout << trace_str << std::endl;
|
||||||
@ -470,7 +471,7 @@ TEST_F(DBIteratorStressTest, StressTest) {
|
|||||||
types[rnd.Next() % (sizeof(types) / sizeof(types[0]))];
|
types[rnd.Next() % (sizeof(types) / sizeof(types[0]))];
|
||||||
}
|
}
|
||||||
e.sequence = i;
|
e.sequence = i;
|
||||||
e.value = "v" + ToString(i);
|
e.value = "v" + std::to_string(i);
|
||||||
ParsedInternalKey internal_key(e.key, e.sequence, e.type);
|
ParsedInternalKey internal_key(e.key, e.sequence, e.type);
|
||||||
AppendInternalKey(&e.ikey, internal_key);
|
AppendInternalKey(&e.ikey, internal_key);
|
||||||
|
|
||||||
|
@ -766,7 +766,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("c", ToString(k));
|
internal_iter->AddPut("c", std::to_string(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -780,7 +780,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
ASSERT_EQ(db_iter->key().ToString(), "c");
|
ASSERT_EQ(db_iter->key().ToString(), "c");
|
||||||
ASSERT_EQ(db_iter->value().ToString(), ToString(i));
|
ASSERT_EQ(db_iter->value().ToString(), std::to_string(i));
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
@ -925,11 +925,11 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("d", ToString(k));
|
internal_iter->AddPut("d", std::to_string(k));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("c", ToString(k));
|
internal_iter->AddPut("c", std::to_string(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -942,7 +942,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
ASSERT_EQ(db_iter->key().ToString(), "d");
|
ASSERT_EQ(db_iter->key().ToString(), "d");
|
||||||
ASSERT_EQ(db_iter->value().ToString(), ToString(i));
|
ASSERT_EQ(db_iter->value().ToString(), std::to_string(i));
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
@ -966,7 +966,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
internal_iter->AddMerge("b", "b");
|
internal_iter->AddMerge("b", "b");
|
||||||
internal_iter->AddMerge("a", "a");
|
internal_iter->AddMerge("a", "a");
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddMerge("c", ToString(k));
|
internal_iter->AddMerge("c", std::to_string(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -981,7 +981,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
ASSERT_EQ(db_iter->key().ToString(), "c");
|
ASSERT_EQ(db_iter->key().ToString(), "c");
|
||||||
std::string merge_result = "0";
|
std::string merge_result = "0";
|
||||||
for (size_t j = 1; j <= i; ++j) {
|
for (size_t j = 1; j <= i; ++j) {
|
||||||
merge_result += "," + ToString(j);
|
merge_result += "," + std::to_string(j);
|
||||||
}
|
}
|
||||||
ASSERT_EQ(db_iter->value().ToString(), merge_result);
|
ASSERT_EQ(db_iter->value().ToString(), merge_result);
|
||||||
|
|
||||||
@ -3156,7 +3156,7 @@ TEST_F(DBIteratorTest, ReverseToForwardWithDisappearingKeys) {
|
|||||||
internal_iter->AddPut("a", "A");
|
internal_iter->AddPut("a", "A");
|
||||||
internal_iter->AddPut("b", "B");
|
internal_iter->AddPut("b", "B");
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
internal_iter->AddPut("c" + ToString(i), "");
|
internal_iter->AddPut("c" + std::to_string(i), "");
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
|
@ -111,9 +111,12 @@ TEST_P(DBIteratorTest, PersistedTierOnIterator) {
|
|||||||
TEST_P(DBIteratorTest, NonBlockingIteration) {
|
TEST_P(DBIteratorTest, NonBlockingIteration) {
|
||||||
do {
|
do {
|
||||||
ReadOptions non_blocking_opts, regular_opts;
|
ReadOptions non_blocking_opts, regular_opts;
|
||||||
Options options = CurrentOptions();
|
anon::OptionsOverride options_override;
|
||||||
|
options_override.full_block_cache = true;
|
||||||
|
Options options = CurrentOptions(options_override);
|
||||||
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
||||||
non_blocking_opts.read_tier = kBlockCacheTier;
|
non_blocking_opts.read_tier = kBlockCacheTier;
|
||||||
|
|
||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
// write one kv to the database.
|
// write one kv to the database.
|
||||||
ASSERT_OK(Put(1, "a", "b"));
|
ASSERT_OK(Put(1, "a", "b"));
|
||||||
@ -3157,7 +3160,7 @@ TEST_F(DBIteratorWithReadCallbackTest, ReadCallback) {
|
|||||||
uint64_t num_versions =
|
uint64_t num_versions =
|
||||||
CurrentOptions().max_sequential_skip_in_iterations + 10;
|
CurrentOptions().max_sequential_skip_in_iterations + 10;
|
||||||
for (uint64_t i = 0; i < num_versions; i++) {
|
for (uint64_t i = 0; i < num_versions; i++) {
|
||||||
ASSERT_OK(Put("bar", ToString(i)));
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
||||||
}
|
}
|
||||||
SequenceNumber seq3 = db_->GetLatestSequenceNumber();
|
SequenceNumber seq3 = db_->GetLatestSequenceNumber();
|
||||||
TestReadCallback callback2(seq3);
|
TestReadCallback callback2(seq3);
|
||||||
@ -3186,7 +3189,7 @@ TEST_F(DBIteratorWithReadCallbackTest, ReadCallback) {
|
|||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
ASSERT_EQ("bar", iter->key());
|
ASSERT_EQ("bar", iter->key());
|
||||||
ASSERT_EQ(ToString(num_versions - 1), iter->value());
|
ASSERT_EQ(std::to_string(num_versions - 1), iter->value());
|
||||||
|
|
||||||
delete iter;
|
delete iter;
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
|
#include "db/blob/blob_index.h"
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "rocksdb/rocksdb_namespace.h"
|
#include "rocksdb/rocksdb_namespace.h"
|
||||||
|
|
||||||
@ -54,7 +55,7 @@ class DbKvChecksumTest
|
|||||||
case WriteBatchOpType::kMerge:
|
case WriteBatchOpType::kMerge:
|
||||||
s = wb.Merge(cf_handle, "key", "val");
|
s = wb.Merge(cf_handle, "key", "val");
|
||||||
break;
|
break;
|
||||||
case WriteBatchOpType::kBlobIndex:
|
case WriteBatchOpType::kBlobIndex: {
|
||||||
// TODO(ajkr): use public API once available.
|
// TODO(ajkr): use public API once available.
|
||||||
uint32_t cf_id;
|
uint32_t cf_id;
|
||||||
if (cf_handle == nullptr) {
|
if (cf_handle == nullptr) {
|
||||||
@ -62,8 +63,14 @@ class DbKvChecksumTest
|
|||||||
} else {
|
} else {
|
||||||
cf_id = cf_handle->GetID();
|
cf_id = cf_handle->GetID();
|
||||||
}
|
}
|
||||||
s = WriteBatchInternal::PutBlobIndex(&wb, cf_id, "key", "val");
|
|
||||||
|
std::string blob_index;
|
||||||
|
BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 9876543210,
|
||||||
|
"val");
|
||||||
|
|
||||||
|
s = WriteBatchInternal::PutBlobIndex(&wb, cf_id, "key", blob_index);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case WriteBatchOpType::kNum:
|
case WriteBatchOpType::kNum:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -72,7 +79,7 @@ class DbKvChecksumTest
|
|||||||
|
|
||||||
void CorruptNextByteCallBack(void* arg) {
|
void CorruptNextByteCallBack(void* arg) {
|
||||||
Slice encoded = *static_cast<Slice*>(arg);
|
Slice encoded = *static_cast<Slice*>(arg);
|
||||||
if (entry_len_ == port::kMaxSizet) {
|
if (entry_len_ == std::numeric_limits<size_t>::max()) {
|
||||||
// We learn the entry size on the first attempt
|
// We learn the entry size on the first attempt
|
||||||
entry_len_ = encoded.size();
|
entry_len_ = encoded.size();
|
||||||
}
|
}
|
||||||
@ -89,7 +96,7 @@ class DbKvChecksumTest
|
|||||||
WriteBatchOpType op_type_;
|
WriteBatchOpType op_type_;
|
||||||
char corrupt_byte_addend_;
|
char corrupt_byte_addend_;
|
||||||
size_t corrupt_byte_offset_ = 0;
|
size_t corrupt_byte_offset_ = 0;
|
||||||
size_t entry_len_ = port::kMaxSizet;
|
size_t entry_len_ = std::numeric_limits<size_t>::max();
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string GetTestNameSuffix(
|
std::string GetTestNameSuffix(
|
||||||
|
@ -187,7 +187,7 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) {
|
|||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
|
|
||||||
for (int i = 0; i < 1024; i++) {
|
for (int i = 0; i < 1024; i++) {
|
||||||
ASSERT_OK(Put("key" + ToString(i), DummyString(10)));
|
ASSERT_OK(Put("key" + std::to_string(i), DummyString(10)));
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -263,20 +263,20 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) {
|
|||||||
struct Handler : public WriteBatch::Handler {
|
struct Handler : public WriteBatch::Handler {
|
||||||
std::string seen;
|
std::string seen;
|
||||||
Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
||||||
seen += "Put(" + ToString(cf) + ", " + key.ToString() + ", " +
|
seen += "Put(" + std::to_string(cf) + ", " + key.ToString() + ", " +
|
||||||
ToString(value.size()) + ")";
|
std::to_string(value.size()) + ")";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
Status MergeCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
Status MergeCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
||||||
seen += "Merge(" + ToString(cf) + ", " + key.ToString() + ", " +
|
seen += "Merge(" + std::to_string(cf) + ", " + key.ToString() + ", " +
|
||||||
ToString(value.size()) + ")";
|
std::to_string(value.size()) + ")";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
void LogData(const Slice& blob) override {
|
void LogData(const Slice& blob) override {
|
||||||
seen += "LogData(" + blob.ToString() + ")";
|
seen += "LogData(" + blob.ToString() + ")";
|
||||||
}
|
}
|
||||||
Status DeleteCF(uint32_t cf, const Slice& key) override {
|
Status DeleteCF(uint32_t cf, const Slice& key) override {
|
||||||
seen += "Delete(" + ToString(cf) + ", " + key.ToString() + ")";
|
seen += "Delete(" + std::to_string(cf) + ", " + key.ToString() + ")";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
} handler;
|
} handler;
|
||||||
|
@ -97,7 +97,7 @@ class MockMemTableRepFactory : public MemTableRepFactory {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
MockMemTableRep* mock_rep_;
|
MockMemTableRep* mock_rep_;
|
||||||
// workaround since there's no port::kMaxUint32 yet.
|
// workaround since there's no std::numeric_limits<uint32_t>::max() yet.
|
||||||
uint32_t last_column_family_id_ = static_cast<uint32_t>(-1);
|
uint32_t last_column_family_id_ = static_cast<uint32_t>(-1);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ TEST_F(DBMemTableTest, DuplicateSeq) {
|
|||||||
if (!insert_dup) {
|
if (!insert_dup) {
|
||||||
seq++;
|
seq++;
|
||||||
}
|
}
|
||||||
Status s = mem->Add(seq, kTypeValue, "foo", "value" + ToString(seq),
|
Status s = mem->Add(seq, kTypeValue, "foo", "value" + std::to_string(seq),
|
||||||
nullptr /* kv_prot_info */);
|
nullptr /* kv_prot_info */);
|
||||||
if (insert_dup) {
|
if (insert_dup) {
|
||||||
ASSERT_TRUE(s.IsTryAgain());
|
ASSERT_TRUE(s.IsTryAgain());
|
||||||
|
@ -424,8 +424,8 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (; i < 3; i++) {
|
for (; i < 3; i++) {
|
||||||
ASSERT_OK(Put("foo", ToString(i)));
|
ASSERT_OK(Put("foo", std::to_string(i)));
|
||||||
ASSERT_OK(Put("bar", ToString(i)));
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -442,8 +442,8 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) {
|
|||||||
dbfull()->GetDBOptions().writable_file_max_buffer_size);
|
dbfull()->GetDBOptions().writable_file_max_buffer_size);
|
||||||
i = 0;
|
i = 0;
|
||||||
for (; i < 3; i++) {
|
for (; i < 3; i++) {
|
||||||
ASSERT_OK(Put("foo", ToString(i)));
|
ASSERT_OK(Put("foo", std::to_string(i)));
|
||||||
ASSERT_OK(Put("bar", ToString(i)));
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -573,8 +573,8 @@ TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Need to insert two keys to avoid trivial move.
|
// Need to insert two keys to avoid trivial move.
|
||||||
ASSERT_OK(Put("foo", ToString(i)));
|
ASSERT_OK(Put("foo", std::to_string(i)));
|
||||||
ASSERT_OK(Put("bar", ToString(i)));
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_EQ("3", FilesPerLevel());
|
ASSERT_EQ("3", FilesPerLevel());
|
||||||
@ -717,8 +717,8 @@ TEST_F(DBOptionsTest, SetStatsDumpPeriodSec) {
|
|||||||
|
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
unsigned int num = rand() % 5000 + 1;
|
unsigned int num = rand() % 5000 + 1;
|
||||||
ASSERT_OK(
|
ASSERT_OK(dbfull()->SetDBOptions(
|
||||||
dbfull()->SetDBOptions({{"stats_dump_period_sec", ToString(num)}}));
|
{{"stats_dump_period_sec", std::to_string(num)}}));
|
||||||
ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
|
ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
|
||||||
}
|
}
|
||||||
Close();
|
Close();
|
||||||
@ -909,7 +909,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -940,7 +940,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -972,7 +972,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -1036,7 +1036,7 @@ TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
|
@ -593,9 +593,9 @@ TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
|
|||||||
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
ResetTableProperties(&sum_tp);
|
ResetTableProperties(&sum_tp);
|
||||||
for (int level = 0; level < kMaxLevel; ++level) {
|
for (int level = 0; level < kMaxLevel; ++level) {
|
||||||
db_->GetProperty(
|
db_->GetProperty(DB::Properties::kAggregatedTablePropertiesAtLevel +
|
||||||
DB::Properties::kAggregatedTablePropertiesAtLevel + ToString(level),
|
std::to_string(level),
|
||||||
&level_tp_strings[level]);
|
&level_tp_strings[level]);
|
||||||
ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
|
ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
|
||||||
sum_tp.data_size += level_tps[level].data_size;
|
sum_tp.data_size += level_tps[level].data_size;
|
||||||
sum_tp.index_size += level_tps[level].index_size;
|
sum_tp.index_size += level_tps[level].index_size;
|
||||||
@ -1091,7 +1091,7 @@ TEST_F(DBPropertiesTest, EstimateCompressionRatio) {
|
|||||||
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
||||||
// Put common data ("key") at end to prevent delta encoding from
|
// Put common data ("key") at end to prevent delta encoding from
|
||||||
// compressing the key effectively
|
// compressing the key effectively
|
||||||
std::string key = ToString(i) + ToString(j) + "key";
|
std::string key = std::to_string(i) + std::to_string(j) + "key";
|
||||||
ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
|
ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1185,7 +1185,7 @@ class CountingDeleteTabPropCollector : public TablePropertiesCollector {
|
|||||||
|
|
||||||
Status Finish(UserCollectedProperties* properties) override {
|
Status Finish(UserCollectedProperties* properties) override {
|
||||||
*properties =
|
*properties =
|
||||||
UserCollectedProperties{{"num_delete", ToString(num_deletes_)}};
|
UserCollectedProperties{{"num_delete", std::to_string(num_deletes_)}};
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1215,7 +1215,7 @@ class BlockCountingTablePropertiesCollector : public TablePropertiesCollector {
|
|||||||
|
|
||||||
Status Finish(UserCollectedProperties* properties) override {
|
Status Finish(UserCollectedProperties* properties) override {
|
||||||
(*properties)[kNumSampledBlocksPropertyName] =
|
(*properties)[kNumSampledBlocksPropertyName] =
|
||||||
ToString(num_sampled_blocks_);
|
std::to_string(num_sampled_blocks_);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1235,7 +1235,7 @@ class BlockCountingTablePropertiesCollector : public TablePropertiesCollector {
|
|||||||
|
|
||||||
UserCollectedProperties GetReadableProperties() const override {
|
UserCollectedProperties GetReadableProperties() const override {
|
||||||
return UserCollectedProperties{
|
return UserCollectedProperties{
|
||||||
{kNumSampledBlocksPropertyName, ToString(num_sampled_blocks_)},
|
{kNumSampledBlocksPropertyName, std::to_string(num_sampled_blocks_)},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1272,7 +1272,8 @@ TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
|
|||||||
// Create 4 tables
|
// Create 4 tables
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val"));
|
ASSERT_OK(
|
||||||
|
db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db_->Flush(FlushOptions()));
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
@ -1312,7 +1313,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
|
|||||||
// Create 2 files
|
// Create 2 files
|
||||||
for (int table = 0; table < 2; ++table) {
|
for (int table = 0; table < 2; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(Put(1, ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
}
|
}
|
||||||
@ -1322,7 +1323,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
|
|||||||
// Trigger automatic compactions.
|
// Trigger automatic compactions.
|
||||||
for (int table = 0; table < 3; ++table) {
|
for (int table = 0; table < 3; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(Put(1, ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -1339,7 +1340,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
|
|||||||
// Create 4 tables in default column family
|
// Create 4 tables in default column family
|
||||||
for (int table = 0; table < 2; ++table) {
|
for (int table = 0; table < 2; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -1349,7 +1350,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
|
|||||||
// Trigger automatic compactions.
|
// Trigger automatic compactions.
|
||||||
for (int table = 0; table < 3; ++table) {
|
for (int table = 0; table < 3; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -1545,7 +1546,7 @@ TEST_F(DBPropertiesTest, BlockAddForCompressionSampling) {
|
|||||||
user_props.end());
|
user_props.end());
|
||||||
ASSERT_EQ(user_props.at(BlockCountingTablePropertiesCollector::
|
ASSERT_EQ(user_props.at(BlockCountingTablePropertiesCollector::
|
||||||
kNumSampledBlocksPropertyName),
|
kNumSampledBlocksPropertyName),
|
||||||
ToString(sample_for_compression ? 1 : 0));
|
std::to_string(sample_for_compression ? 1 : 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1742,11 +1743,11 @@ TEST_F(DBPropertiesTest, SstFilesSize) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put("key" + ToString(i), std::string(1000, 'v')));
|
ASSERT_OK(Put("key" + std::to_string(i), std::string(1000, 'v')));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
ASSERT_OK(Delete("key" + ToString(i)));
|
ASSERT_OK(Delete("key" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
uint64_t sst_size;
|
uint64_t sst_size;
|
||||||
@ -1997,6 +1998,37 @@ TEST_F(DBPropertiesTest, GetMapPropertyDbStats) {
|
|||||||
Close();
|
Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(DBPropertiesTest, GetMapPropertyBlockCacheEntryStats) {
|
||||||
|
// Currently only verifies the expected properties are present
|
||||||
|
std::map<std::string, std::string> values;
|
||||||
|
ASSERT_TRUE(
|
||||||
|
db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
|
||||||
|
|
||||||
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheId()) !=
|
||||||
|
values.end());
|
||||||
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheCapacityBytes()) !=
|
||||||
|
values.end());
|
||||||
|
ASSERT_TRUE(
|
||||||
|
values.find(
|
||||||
|
BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()) !=
|
||||||
|
values.end());
|
||||||
|
ASSERT_TRUE(
|
||||||
|
values.find(BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()) !=
|
||||||
|
values.end());
|
||||||
|
for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
|
||||||
|
CacheEntryRole role = static_cast<CacheEntryRole>(i);
|
||||||
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::EntryCount(role)) !=
|
||||||
|
values.end());
|
||||||
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedBytes(role)) !=
|
||||||
|
values.end());
|
||||||
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedPercent(role)) !=
|
||||||
|
values.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
// There should be no extra values in the map.
|
||||||
|
ASSERT_EQ(3 * kNumCacheEntryRoles + 4, values.size());
|
||||||
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
std::string PopMetaIndexKey(InternalIterator* meta_iter) {
|
std::string PopMetaIndexKey(InternalIterator* meta_iter) {
|
||||||
Status s = meta_iter->status();
|
Status s = meta_iter->status();
|
||||||
|
@ -190,9 +190,10 @@ TEST_F(DBRangeDelTest, MaxCompactionBytesCutsOutputFiles) {
|
|||||||
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(2), 2);
|
ASSERT_EQ(NumTableFilesAtLevel(2), 2);
|
||||||
|
|
||||||
ASSERT_OK(db_->SetOptions(
|
ASSERT_OK(
|
||||||
db_->DefaultColumnFamily(),
|
db_->SetOptions(db_->DefaultColumnFamily(),
|
||||||
{{"target_file_size_base", ToString(100 * opts.max_compaction_bytes)}}));
|
{{"target_file_size_base",
|
||||||
|
std::to_string(100 * opts.max_compaction_bytes)}}));
|
||||||
|
|
||||||
// It spans the whole key-range, thus will be included in all output files
|
// It spans the whole key-range, thus will be included in all output files
|
||||||
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
|
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
|
||||||
@ -500,7 +501,8 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) {
|
|||||||
1 /* input_level */, 2 /* output_level */, CompactRangeOptions(),
|
1 /* input_level */, 2 /* output_level */, CompactRangeOptions(),
|
||||||
nullptr /* begin */, nullptr /* end */, true /* exclusive */,
|
nullptr /* begin */, nullptr /* end */, true /* exclusive */,
|
||||||
true /* disallow_trivial_move */,
|
true /* disallow_trivial_move */,
|
||||||
port::kMaxUint64 /* max_file_num_to_ignore */, "" /*trim_ts*/));
|
std::numeric_limits<uint64_t>::max() /* max_file_num_to_ignore */,
|
||||||
|
"" /*trim_ts*/));
|
||||||
}
|
}
|
||||||
#endif // ROCKSDB_LITE
|
#endif // ROCKSDB_LITE
|
||||||
|
|
||||||
|
@ -188,8 +188,8 @@ TEST_F(DBSecondaryTest, SimpleInternalCompaction) {
|
|||||||
auto cfh = db_secondary_->DefaultColumnFamily();
|
auto cfh = db_secondary_->DefaultColumnFamily();
|
||||||
|
|
||||||
CompactionServiceResult result;
|
CompactionServiceResult result;
|
||||||
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input,
|
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
&result));
|
OpenAndCompactOptions(), cfh, input, &result));
|
||||||
|
|
||||||
ASSERT_EQ(result.output_files.size(), 1);
|
ASSERT_EQ(result.output_files.size(), 1);
|
||||||
InternalKey smallest, largest;
|
InternalKey smallest, largest;
|
||||||
@ -212,20 +212,20 @@ TEST_F(DBSecondaryTest, InternalCompactionMultiLevels) {
|
|||||||
const int kRangeL2 = 10;
|
const int kRangeL2 = 10;
|
||||||
const int kRangeL1 = 30;
|
const int kRangeL1 = 30;
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i * kRangeL2), "value" + ToString(i)));
|
ASSERT_OK(Put(Key(i * kRangeL2), "value" + std::to_string(i)));
|
||||||
ASSERT_OK(Put(Key((i + 1) * kRangeL2 - 1), "value" + ToString(i)));
|
ASSERT_OK(Put(Key((i + 1) * kRangeL2 - 1), "value" + std::to_string(i)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
MoveFilesToLevel(2);
|
MoveFilesToLevel(2);
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
ASSERT_OK(Put(Key(i * kRangeL1), "value" + ToString(i)));
|
ASSERT_OK(Put(Key(i * kRangeL1), "value" + std::to_string(i)));
|
||||||
ASSERT_OK(Put(Key((i + 1) * kRangeL1 - 1), "value" + ToString(i)));
|
ASSERT_OK(Put(Key((i + 1) * kRangeL1 - 1), "value" + std::to_string(i)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
MoveFilesToLevel(1);
|
MoveFilesToLevel(1);
|
||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
ASSERT_OK(Put(Key(i * 30), "value" + ToString(i)));
|
ASSERT_OK(Put(Key(i * 30), "value" + std::to_string(i)));
|
||||||
ASSERT_OK(Put(Key(i * 30 + 50), "value" + ToString(i)));
|
ASSERT_OK(Put(Key(i * 30 + 50), "value" + std::to_string(i)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,8 +248,8 @@ TEST_F(DBSecondaryTest, InternalCompactionMultiLevels) {
|
|||||||
OpenSecondary(options);
|
OpenSecondary(options);
|
||||||
auto cfh = db_secondary_->DefaultColumnFamily();
|
auto cfh = db_secondary_->DefaultColumnFamily();
|
||||||
CompactionServiceResult result;
|
CompactionServiceResult result;
|
||||||
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input1,
|
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
&result));
|
OpenAndCompactOptions(), cfh, input1, &result));
|
||||||
ASSERT_OK(result.status);
|
ASSERT_OK(result.status);
|
||||||
|
|
||||||
// pick 2 files on level 1 for compaction, which has 6 overlap files on L2
|
// pick 2 files on level 1 for compaction, which has 6 overlap files on L2
|
||||||
@ -261,8 +261,8 @@ TEST_F(DBSecondaryTest, InternalCompactionMultiLevels) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
input2.output_level = 2;
|
input2.output_level = 2;
|
||||||
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input2,
|
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
&result));
|
OpenAndCompactOptions(), cfh, input2, &result));
|
||||||
ASSERT_OK(result.status);
|
ASSERT_OK(result.status);
|
||||||
|
|
||||||
CloseSecondary();
|
CloseSecondary();
|
||||||
@ -273,15 +273,15 @@ TEST_F(DBSecondaryTest, InternalCompactionMultiLevels) {
|
|||||||
}
|
}
|
||||||
OpenSecondary(options);
|
OpenSecondary(options);
|
||||||
cfh = db_secondary_->DefaultColumnFamily();
|
cfh = db_secondary_->DefaultColumnFamily();
|
||||||
Status s = db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input2,
|
Status s = db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
&result);
|
OpenAndCompactOptions(), cfh, input2, &result);
|
||||||
ASSERT_TRUE(s.IsInvalidArgument());
|
ASSERT_TRUE(s.IsInvalidArgument());
|
||||||
ASSERT_OK(result.status);
|
ASSERT_OK(result.status);
|
||||||
|
|
||||||
// TODO: L0 -> L1 compaction should success, currently version is not built
|
// TODO: L0 -> L1 compaction should success, currently version is not built
|
||||||
// if files is missing.
|
// if files is missing.
|
||||||
// ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(cfh,
|
// ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(OpenAndCompactOptions(),
|
||||||
// input1, &result));
|
// cfh, input1, &result));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBSecondaryTest, InternalCompactionCompactedFiles) {
|
TEST_F(DBSecondaryTest, InternalCompactionCompactedFiles) {
|
||||||
@ -319,8 +319,8 @@ TEST_F(DBSecondaryTest, InternalCompactionCompactedFiles) {
|
|||||||
auto cfh = db_secondary_->DefaultColumnFamily();
|
auto cfh = db_secondary_->DefaultColumnFamily();
|
||||||
|
|
||||||
CompactionServiceResult result;
|
CompactionServiceResult result;
|
||||||
Status s =
|
Status s = db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input, &result);
|
OpenAndCompactOptions(), cfh, input, &result);
|
||||||
ASSERT_TRUE(s.IsInvalidArgument());
|
ASSERT_TRUE(s.IsInvalidArgument());
|
||||||
ASSERT_OK(result.status);
|
ASSERT_OK(result.status);
|
||||||
}
|
}
|
||||||
@ -356,15 +356,15 @@ TEST_F(DBSecondaryTest, InternalCompactionMissingFiles) {
|
|||||||
auto cfh = db_secondary_->DefaultColumnFamily();
|
auto cfh = db_secondary_->DefaultColumnFamily();
|
||||||
|
|
||||||
CompactionServiceResult result;
|
CompactionServiceResult result;
|
||||||
Status s =
|
Status s = db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input, &result);
|
OpenAndCompactOptions(), cfh, input, &result);
|
||||||
ASSERT_TRUE(s.IsInvalidArgument());
|
ASSERT_TRUE(s.IsInvalidArgument());
|
||||||
ASSERT_OK(result.status);
|
ASSERT_OK(result.status);
|
||||||
|
|
||||||
input.input_files.erase(input.input_files.begin());
|
input.input_files.erase(input.input_files.begin());
|
||||||
|
|
||||||
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(cfh, input,
|
ASSERT_OK(db_secondary_full()->TEST_CompactWithoutInstallation(
|
||||||
&result));
|
OpenAndCompactOptions(), cfh, input, &result));
|
||||||
ASSERT_OK(result.status);
|
ASSERT_OK(result.status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,6 +280,58 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
|
|||||||
listener->VerifyMatchedCount(1);
|
listener->VerifyMatchedCount(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that producing an empty .sst file does not write it out to
|
||||||
|
// disk, and that the DeleteFile() env method is not called for
|
||||||
|
// removing the non-existing file later.
|
||||||
|
TEST_F(DBSSTTest, DeleteFileNotCalledForNotCreatedSSTFile) {
|
||||||
|
Options options = CurrentOptions();
|
||||||
|
options.env = env_;
|
||||||
|
|
||||||
|
OnFileDeletionListener* listener = new OnFileDeletionListener();
|
||||||
|
options.listeners.emplace_back(listener);
|
||||||
|
|
||||||
|
Reopen(options);
|
||||||
|
|
||||||
|
// Flush the empty database.
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
ASSERT_EQ("", FilesPerLevel(0));
|
||||||
|
|
||||||
|
// We expect no .sst files.
|
||||||
|
std::vector<LiveFileMetaData> metadata;
|
||||||
|
db_->GetLiveFilesMetaData(&metadata);
|
||||||
|
ASSERT_EQ(metadata.size(), 0U);
|
||||||
|
|
||||||
|
// We expect no file deletions.
|
||||||
|
listener->VerifyMatchedCount(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that producing a non-empty .sst file does write it out to
|
||||||
|
// disk, and that the DeleteFile() env method is not called for removing
|
||||||
|
// the file later.
|
||||||
|
TEST_F(DBSSTTest, DeleteFileNotCalledForCreatedSSTFile) {
|
||||||
|
Options options = CurrentOptions();
|
||||||
|
options.env = env_;
|
||||||
|
|
||||||
|
OnFileDeletionListener* listener = new OnFileDeletionListener();
|
||||||
|
options.listeners.emplace_back(listener);
|
||||||
|
|
||||||
|
Reopen(options);
|
||||||
|
|
||||||
|
ASSERT_OK(Put("pika", "choo"));
|
||||||
|
|
||||||
|
// Flush the non-empty database.
|
||||||
|
ASSERT_OK(Flush());
|
||||||
|
ASSERT_EQ("1", FilesPerLevel(0));
|
||||||
|
|
||||||
|
// We expect 1 .sst files.
|
||||||
|
std::vector<LiveFileMetaData> metadata;
|
||||||
|
db_->GetLiveFilesMetaData(&metadata);
|
||||||
|
ASSERT_EQ(metadata.size(), 1U);
|
||||||
|
|
||||||
|
// We expect no file deletions.
|
||||||
|
listener->VerifyMatchedCount(0);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(DBSSTTest, DBWithSstFileManager) {
|
TEST_F(DBSSTTest, DBWithSstFileManager) {
|
||||||
std::shared_ptr<SstFileManager> sst_file_manager(NewSstFileManager(env_));
|
std::shared_ptr<SstFileManager> sst_file_manager(NewSstFileManager(env_));
|
||||||
auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
|
auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
|
||||||
@ -947,7 +999,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) {
|
|||||||
|
|
||||||
// Create 4 files in L0
|
// Create 4 files in L0
|
||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A'), wo));
|
ASSERT_OK(Put("Key" + std::to_string(i), DummyString(1024, 'A'), wo));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
// We created 4 sst files in L0
|
// We created 4 sst files in L0
|
||||||
@ -963,7 +1015,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) {
|
|||||||
|
|
||||||
// Create 4 files in L0
|
// Create 4 files in L0
|
||||||
for (int i = 4; i < 8; i++) {
|
for (int i = 4; i < 8; i++) {
|
||||||
ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'B'), wo));
|
ASSERT_OK(Put("Key" + std::to_string(i), DummyString(1024, 'B'), wo));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
ASSERT_EQ("4,1", FilesPerLevel(0));
|
ASSERT_EQ("4,1", FilesPerLevel(0));
|
||||||
@ -1009,7 +1061,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) {
|
|||||||
|
|
||||||
// Create 4 files in L0
|
// Create 4 files in L0
|
||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A')));
|
ASSERT_OK(Put("Key" + std::to_string(i), DummyString(1024, 'A')));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
// We created 4 sst files in L0
|
// We created 4 sst files in L0
|
||||||
@ -1478,7 +1530,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
|
|||||||
// Generate 5 files in L0
|
// Generate 5 files in L0
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
std::string val = "val_file_" + ToString(i);
|
std::string val = "val_file_" + std::to_string(i);
|
||||||
ASSERT_OK(Put(Key(j), val));
|
ASSERT_OK(Put(Key(j), val));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
@ -84,7 +84,8 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) {
|
|||||||
}
|
}
|
||||||
// Build file
|
// Build file
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val"));
|
ASSERT_OK(
|
||||||
|
db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db_->Flush(FlushOptions()));
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
@ -113,7 +114,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) {
|
|||||||
// fetch key from 1st and 2nd table, which will internally place that table to
|
// fetch key from 1st and 2nd table, which will internally place that table to
|
||||||
// the table cache.
|
// the table cache.
|
||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
Get(ToString(i * 100 + 0));
|
Get(std::to_string(i * 100 + 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
||||||
@ -122,7 +123,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
// fetch key from all tables, which will place them in table cache.
|
// fetch key from all tables, which will place them in table cache.
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
Get(ToString(i * 100 + 0));
|
Get(std::to_string(i * 100 + 0));
|
||||||
}
|
}
|
||||||
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
||||||
|
|
||||||
@ -156,7 +157,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) {
|
|||||||
} else {
|
} else {
|
||||||
bool found_corruption = false;
|
bool found_corruption = false;
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
std::string result = Get(ToString(i * 100 + 0));
|
std::string result = Get(std::to_string(i * 100 + 0));
|
||||||
if (result.find_first_of("Corruption: block checksum mismatch") !=
|
if (result.find_first_of("Corruption: block checksum mismatch") !=
|
||||||
std::string::npos) {
|
std::string::npos) {
|
||||||
found_corruption = true;
|
found_corruption = true;
|
||||||
@ -187,7 +188,7 @@ TEST_F(DBTablePropertiesTest, InvalidIgnored) {
|
|||||||
|
|
||||||
// Build file
|
// Build file
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), ToString(i), "val"));
|
ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), "val"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db_->Flush(FlushOptions()));
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
|
|
||||||
|
@ -2427,8 +2427,8 @@ TEST_F(DBTest, SnapshotFiles) {
|
|||||||
|
|
||||||
// Also test GetLiveFilesStorageInfo
|
// Also test GetLiveFilesStorageInfo
|
||||||
std::vector<LiveFileStorageInfo> new_infos;
|
std::vector<LiveFileStorageInfo> new_infos;
|
||||||
ASSERT_OK(dbfull()->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(),
|
ASSERT_OK(db_->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(),
|
||||||
&new_infos));
|
&new_infos));
|
||||||
|
|
||||||
// Close DB (while deletions disabled)
|
// Close DB (while deletions disabled)
|
||||||
Close();
|
Close();
|
||||||
@ -2734,7 +2734,7 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) {
|
|||||||
Options options = CurrentOptions(options_override);
|
Options options = CurrentOptions(options_override);
|
||||||
std::vector<std::string> cfs;
|
std::vector<std::string> cfs;
|
||||||
for (int i = 1; i < kColumnFamilies; ++i) {
|
for (int i = 1; i < kColumnFamilies; ++i) {
|
||||||
cfs.push_back(ToString(i));
|
cfs.push_back(std::to_string(i));
|
||||||
}
|
}
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
CreateAndReopenWithCF(cfs, options);
|
CreateAndReopenWithCF(cfs, options);
|
||||||
@ -2786,7 +2786,7 @@ static void GCThreadBody(void* arg) {
|
|||||||
WriteOptions wo;
|
WriteOptions wo;
|
||||||
|
|
||||||
for (int i = 0; i < kGCNumKeys; ++i) {
|
for (int i = 0; i < kGCNumKeys; ++i) {
|
||||||
std::string kv(ToString(i + id * kGCNumKeys));
|
std::string kv(std::to_string(i + id * kGCNumKeys));
|
||||||
ASSERT_OK(db->Put(wo, kv, kv));
|
ASSERT_OK(db->Put(wo, kv, kv));
|
||||||
}
|
}
|
||||||
t->done = true;
|
t->done = true;
|
||||||
@ -2822,7 +2822,7 @@ TEST_F(DBTest, GroupCommitTest) {
|
|||||||
|
|
||||||
std::vector<std::string> expected_db;
|
std::vector<std::string> expected_db;
|
||||||
for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
|
for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
|
||||||
expected_db.push_back(ToString(i));
|
expected_db.push_back(std::to_string(i));
|
||||||
}
|
}
|
||||||
std::sort(expected_db.begin(), expected_db.end());
|
std::sort(expected_db.begin(), expected_db.end());
|
||||||
|
|
||||||
@ -3591,7 +3591,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
for (int j = 0; j < 110; ++j) {
|
for (int j = 0; j < 110; ++j) {
|
||||||
ASSERT_OK(Put(ToString(i * 100 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 100 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
// flush should happen here
|
// flush should happen here
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
@ -3607,7 +3607,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
|
||||||
for (int i = 0; i < 50; ++i) {
|
for (int i = 0; i < 50; ++i) {
|
||||||
// these keys should be deleted in previous compaction
|
// these keys should be deleted in previous compaction
|
||||||
ASSERT_EQ("NOT_FOUND", Get(ToString(i)));
|
ASSERT_EQ("NOT_FOUND", Get(std::to_string(i)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3629,7 +3629,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3640,7 +3640,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j + 2000), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3670,27 +3670,27 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Each file contains a different key which will be dropped later.
|
// Each file contains a different key which will be dropped later.
|
||||||
ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
|
ASSERT_OK(Put("a" + std::to_string(i), rnd.RandomString(500)));
|
||||||
ASSERT_OK(Put("key" + ToString(i), ""));
|
ASSERT_OK(Put("key" + std::to_string(i), ""));
|
||||||
ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
|
ASSERT_OK(Put("z" + std::to_string(i), rnd.RandomString(500)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
ASSERT_EQ("", Get("key" + ToString(i)));
|
ASSERT_EQ("", Get("key" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Each file contains a different key which will be dropped later.
|
// Each file contains a different key which will be dropped later.
|
||||||
ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
|
ASSERT_OK(Put("a" + std::to_string(i), rnd.RandomString(500)));
|
||||||
ASSERT_OK(Delete("key" + ToString(i)));
|
ASSERT_OK(Delete("key" + std::to_string(i)));
|
||||||
ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
|
ASSERT_OK(Put("z" + std::to_string(i), rnd.RandomString(500)));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i)));
|
ASSERT_EQ("NOT_FOUND", Get("key" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3759,7 +3759,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3791,7 +3791,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3807,7 +3807,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
// Create 1 more file to trigger TTL compaction. The old files are dropped.
|
// Create 1 more file to trigger TTL compaction. The old files are dropped.
|
||||||
for (int i = 0; i < 1; i++) {
|
for (int i = 0; i < 1; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -3833,7 +3833,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3848,7 +3848,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
|
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
for (int j = 0; j < 140; j++) {
|
for (int j = 0; j < 140; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3871,7 +3871,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3890,7 +3890,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
// Create 10 more files. The old 5 files are dropped as their ttl expired.
|
// Create 10 more files. The old 5 files are dropped as their ttl expired.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3915,7 +3915,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3926,7 +3926,8 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
|
ASSERT_OK(
|
||||||
|
Put(std::to_string(i * 20 + j + 2000), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -4207,7 +4208,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) {
|
|||||||
std::vector<port::Thread> threads;
|
std::vector<port::Thread> threads;
|
||||||
threads.emplace_back([&] {
|
threads.emplace_back([&] {
|
||||||
for (size_t i = 0; i < cnt; i++) {
|
for (size_t i = 0; i < cnt; i++) {
|
||||||
auto istr = ToString(i);
|
auto istr = std::to_string(i);
|
||||||
ASSERT_OK(db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr,
|
ASSERT_OK(db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr,
|
||||||
"b" + istr));
|
"b" + istr));
|
||||||
}
|
}
|
||||||
@ -4215,7 +4216,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) {
|
|||||||
if (two_write_queues) {
|
if (two_write_queues) {
|
||||||
threads.emplace_back([&] {
|
threads.emplace_back([&] {
|
||||||
for (size_t i = cnt; i < 2 * cnt; i++) {
|
for (size_t i = cnt; i < 2 * cnt; i++) {
|
||||||
auto istr = ToString(i);
|
auto istr = std::to_string(i);
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
ASSERT_OK(batch.Put("a" + istr, "b" + istr));
|
ASSERT_OK(batch.Put("a" + istr, "b" + istr));
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
@ -4236,7 +4237,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
for (size_t i = 0; i < cnt; i++) {
|
for (size_t i = 0; i < cnt; i++) {
|
||||||
PinnableSlice pval;
|
PinnableSlice pval;
|
||||||
auto istr = ToString(i);
|
auto istr = std::to_string(i);
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
db_->Get(ropt, db_->DefaultColumnFamily(), "a" + istr, &pval));
|
db_->Get(ropt, db_->DefaultColumnFamily(), "a" + istr, &pval));
|
||||||
ASSERT_TRUE(pval == ("b" + istr));
|
ASSERT_TRUE(pval == ("b" + istr));
|
||||||
@ -4259,7 +4260,7 @@ TEST_F(DBTest, ManualFlushWalAndWriteRace) {
|
|||||||
|
|
||||||
port::Thread writeThread([&]() {
|
port::Thread writeThread([&]() {
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
auto istr = ToString(i);
|
auto istr = std::to_string(i);
|
||||||
ASSERT_OK(dbfull()->Put(wopts, "key_" + istr, "value_" + istr));
|
ASSERT_OK(dbfull()->Put(wopts, "key_" + istr, "value_" + istr));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -4607,7 +4608,7 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) {
|
|||||||
// The Put Phase.
|
// The Put Phase.
|
||||||
for (int file = 0; file < kNumL0Files; ++file) {
|
for (int file = 0; file < kNumL0Files; ++file) {
|
||||||
for (int key = 0; key < kEntriesPerBuffer; ++key) {
|
for (int key = 0; key < kEntriesPerBuffer; ++key) {
|
||||||
ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
|
ASSERT_OK(Put(std::to_string(key + file * kEntriesPerBuffer),
|
||||||
rnd.RandomString(kTestValueSize)));
|
rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -4758,7 +4759,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
|
|||||||
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
||||||
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
||||||
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
||||||
ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
|
ASSERT_OK(Put(std::to_string(key++), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(env_->GetThreadList(&thread_list));
|
ASSERT_OK(env_->GetThreadList(&thread_list));
|
||||||
@ -4845,7 +4846,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
|
|||||||
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
||||||
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
||||||
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
||||||
ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
|
ASSERT_OK(Put(std::to_string(key++), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(env_->GetThreadList(&thread_list));
|
ASSERT_OK(env_->GetThreadList(&thread_list));
|
||||||
@ -5156,8 +5157,9 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
// Writing to 64KB L0 files should trigger a compaction. Since these
|
// Writing to 64KB L0 files should trigger a compaction. Since these
|
||||||
// 2 L0 files have the same key range, compaction merge them and should
|
// 2 L0 files have the same key range, compaction merge them and should
|
||||||
// result in 2 32KB L1 files.
|
// result in 2 32KB L1 files.
|
||||||
ASSERT_OK(dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
|
ASSERT_OK(
|
||||||
{"target_file_size_base", ToString(k32KB)}}));
|
dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
|
||||||
|
{"target_file_size_base", std::to_string(k32KB)}}));
|
||||||
|
|
||||||
gen_l0_kb(0, 64, 1);
|
gen_l0_kb(0, 64, 1);
|
||||||
ASSERT_EQ("1,1", FilesPerLevel());
|
ASSERT_EQ("1,1", FilesPerLevel());
|
||||||
@ -5176,8 +5178,8 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
// Increase level base size to 256KB and write enough data that will
|
// Increase level base size to 256KB and write enough data that will
|
||||||
// fill L1 and L2. L1 size should be around 256KB while L2 size should be
|
// fill L1 and L2. L1 size should be around 256KB while L2 size should be
|
||||||
// around 256KB x 4.
|
// around 256KB x 4.
|
||||||
ASSERT_OK(
|
ASSERT_OK(dbfull()->SetOptions(
|
||||||
dbfull()->SetOptions({{"max_bytes_for_level_base", ToString(k1MB)}}));
|
{{"max_bytes_for_level_base", std::to_string(k1MB)}}));
|
||||||
|
|
||||||
// writing 96 x 64KB => 6 * 1024KB
|
// writing 96 x 64KB => 6 * 1024KB
|
||||||
// (L1 + L2) = (1 + 4) * 1024KB
|
// (L1 + L2) = (1 + 4) * 1024KB
|
||||||
@ -5196,9 +5198,9 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
// max_bytes_for_level_base. Now, reduce both mulitplier and level base,
|
// max_bytes_for_level_base. Now, reduce both mulitplier and level base,
|
||||||
// After filling enough data that can fit in L1 - L3, we should see L1 size
|
// After filling enough data that can fit in L1 - L3, we should see L1 size
|
||||||
// reduces to 128KB from 256KB which was asserted previously. Same for L2.
|
// reduces to 128KB from 256KB which was asserted previously. Same for L2.
|
||||||
ASSERT_OK(
|
ASSERT_OK(dbfull()->SetOptions(
|
||||||
dbfull()->SetOptions({{"max_bytes_for_level_multiplier", "2"},
|
{{"max_bytes_for_level_multiplier", "2"},
|
||||||
{"max_bytes_for_level_base", ToString(k128KB)}}));
|
{"max_bytes_for_level_base", std::to_string(k128KB)}}));
|
||||||
|
|
||||||
// writing 20 x 64KB = 10 x 128KB
|
// writing 20 x 64KB = 10 x 128KB
|
||||||
// (L1 + L2 + L3) = (1 + 2 + 4) * 128KB
|
// (L1 + L2 + L3) = (1 + 2 + 4) * 128KB
|
||||||
@ -5854,7 +5856,7 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
|
|||||||
// put some data
|
// put some data
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
||||||
++n;
|
++n;
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -6238,7 +6240,7 @@ TEST_F(DBTest, LargeBatchWithColumnFamilies) {
|
|||||||
(write_size / 1024 / 1024), pass);
|
(write_size / 1024 / 1024), pass);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
std::string data(3000, j++ % 127 + 20);
|
std::string data(3000, j++ % 127 + 20);
|
||||||
data += ToString(j);
|
data += std::to_string(j);
|
||||||
ASSERT_OK(batch.Put(handles_[0], Slice(data), Slice(data)));
|
ASSERT_OK(batch.Put(handles_[0], Slice(data), Slice(data)));
|
||||||
if (batch.GetDataSize() > write_size) {
|
if (batch.GetDataSize() > write_size) {
|
||||||
break;
|
break;
|
||||||
|
@ -38,10 +38,10 @@ class DBTest2 : public DBTestBase {
|
|||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
uint64_t GetSstSizeHelper(Temperature temperature) {
|
uint64_t GetSstSizeHelper(Temperature temperature) {
|
||||||
std::string prop;
|
std::string prop;
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(dbfull()->GetProperty(
|
||||||
dbfull()->GetProperty(DB::Properties::kLiveSstFilesSizeAtTemperature +
|
DB::Properties::kLiveSstFilesSizeAtTemperature +
|
||||||
ToString(static_cast<uint8_t>(temperature)),
|
std::to_string(static_cast<uint8_t>(temperature)),
|
||||||
&prop));
|
&prop));
|
||||||
return static_cast<uint64_t>(std::atoi(prop.c_str()));
|
return static_cast<uint64_t>(std::atoi(prop.c_str()));
|
||||||
}
|
}
|
||||||
#endif // ROCKSDB_LITE
|
#endif // ROCKSDB_LITE
|
||||||
@ -1694,9 +1694,9 @@ class CompactionCompressionListener : public EventListener {
|
|||||||
int bottommost_level = 0;
|
int bottommost_level = 0;
|
||||||
for (int level = 0; level < db->NumberLevels(); level++) {
|
for (int level = 0; level < db->NumberLevels(); level++) {
|
||||||
std::string files_at_level;
|
std::string files_at_level;
|
||||||
ASSERT_TRUE(db->GetProperty(
|
ASSERT_TRUE(
|
||||||
"rocksdb.num-files-at-level" + ROCKSDB_NAMESPACE::ToString(level),
|
db->GetProperty("rocksdb.num-files-at-level" + std::to_string(level),
|
||||||
&files_at_level));
|
&files_at_level));
|
||||||
if (files_at_level != "0") {
|
if (files_at_level != "0") {
|
||||||
bottommost_level = level;
|
bottommost_level = level;
|
||||||
}
|
}
|
||||||
@ -2492,14 +2492,14 @@ TEST_F(DBTest2, TestPerfContextIterCpuTime) {
|
|||||||
|
|
||||||
const size_t kNumEntries = 10;
|
const size_t kNumEntries = 10;
|
||||||
for (size_t i = 0; i < kNumEntries; ++i) {
|
for (size_t i = 0; i < kNumEntries; ++i) {
|
||||||
ASSERT_OK(Put("k" + ToString(i), "v" + ToString(i)));
|
ASSERT_OK(Put("k" + std::to_string(i), "v" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
for (size_t i = 0; i < kNumEntries; ++i) {
|
for (size_t i = 0; i < kNumEntries; ++i) {
|
||||||
ASSERT_EQ("v" + ToString(i), Get("k" + ToString(i)));
|
ASSERT_EQ("v" + std::to_string(i), Get("k" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
std::string last_key = "k" + ToString(kNumEntries - 1);
|
std::string last_key = "k" + std::to_string(kNumEntries - 1);
|
||||||
std::string last_value = "v" + ToString(kNumEntries - 1);
|
std::string last_value = "v" + std::to_string(kNumEntries - 1);
|
||||||
env_->now_cpu_count_.store(0);
|
env_->now_cpu_count_.store(0);
|
||||||
env_->SetMockSleep();
|
env_->SetMockSleep();
|
||||||
|
|
||||||
@ -5553,7 +5553,7 @@ TEST_F(DBTest2, MultiDBParallelOpenTest) {
|
|||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
std::vector<std::string> dbnames;
|
std::vector<std::string> dbnames;
|
||||||
for (int i = 0; i < kNumDbs; ++i) {
|
for (int i = 0; i < kNumDbs; ++i) {
|
||||||
dbnames.emplace_back(test::PerThreadDBPath(env_, "db" + ToString(i)));
|
dbnames.emplace_back(test::PerThreadDBPath(env_, "db" + std::to_string(i)));
|
||||||
ASSERT_OK(DestroyDB(dbnames.back(), options));
|
ASSERT_OK(DestroyDB(dbnames.back(), options));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
|
|
||||||
#include "db/forward_iterator.h"
|
#include "db/forward_iterator.h"
|
||||||
#include "env/mock_env.h"
|
#include "env/mock_env.h"
|
||||||
|
#include "port/lang.h"
|
||||||
|
#include "rocksdb/cache.h"
|
||||||
#include "rocksdb/convenience.h"
|
#include "rocksdb/convenience.h"
|
||||||
#include "rocksdb/env_encryption.h"
|
#include "rocksdb/env_encryption.h"
|
||||||
#include "rocksdb/unique_id.h"
|
#include "rocksdb/unique_id.h"
|
||||||
@ -360,6 +362,17 @@ Options DBTestBase::GetOptions(
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
|
||||||
"NewWritableFile:O_DIRECT");
|
"NewWritableFile:O_DIRECT");
|
||||||
#endif
|
#endif
|
||||||
|
// kMustFreeHeapAllocations -> indicates ASAN build
|
||||||
|
if (kMustFreeHeapAllocations && !options_override.full_block_cache) {
|
||||||
|
// Detecting block cache use-after-free is normally difficult in unit
|
||||||
|
// tests, because as a cache, it tends to keep unreferenced entries in
|
||||||
|
// memory, and we normally want unit tests to take advantage of block
|
||||||
|
// cache for speed. However, we also want a strong chance of detecting
|
||||||
|
// block cache use-after-free in unit tests in ASAN builds, so for ASAN
|
||||||
|
// builds we use a trivially small block cache to which entries can be
|
||||||
|
// added but are immediately freed on no more references.
|
||||||
|
table_options.block_cache = NewLRUCache(/* too small */ 1);
|
||||||
|
}
|
||||||
|
|
||||||
bool can_allow_mmap = IsMemoryMappedAccessSupported();
|
bool can_allow_mmap = IsMemoryMappedAccessSupported();
|
||||||
switch (option_config) {
|
switch (option_config) {
|
||||||
@ -831,7 +844,7 @@ std::vector<std::string> DBTestBase::MultiGet(std::vector<int> cfs,
|
|||||||
std::vector<Status> s;
|
std::vector<Status> s;
|
||||||
if (!batched) {
|
if (!batched) {
|
||||||
s = db_->MultiGet(options, handles, keys, &result);
|
s = db_->MultiGet(options, handles, keys, &result);
|
||||||
for (unsigned int i = 0; i < s.size(); ++i) {
|
for (size_t i = 0; i < s.size(); ++i) {
|
||||||
if (s[i].IsNotFound()) {
|
if (s[i].IsNotFound()) {
|
||||||
result[i] = "NOT_FOUND";
|
result[i] = "NOT_FOUND";
|
||||||
} else if (!s[i].ok()) {
|
} else if (!s[i].ok()) {
|
||||||
@ -844,13 +857,16 @@ std::vector<std::string> DBTestBase::MultiGet(std::vector<int> cfs,
|
|||||||
s.resize(cfs.size());
|
s.resize(cfs.size());
|
||||||
db_->MultiGet(options, cfs.size(), handles.data(), keys.data(),
|
db_->MultiGet(options, cfs.size(), handles.data(), keys.data(),
|
||||||
pin_values.data(), s.data());
|
pin_values.data(), s.data());
|
||||||
for (unsigned int i = 0; i < s.size(); ++i) {
|
for (size_t i = 0; i < s.size(); ++i) {
|
||||||
if (s[i].IsNotFound()) {
|
if (s[i].IsNotFound()) {
|
||||||
result[i] = "NOT_FOUND";
|
result[i] = "NOT_FOUND";
|
||||||
} else if (!s[i].ok()) {
|
} else if (!s[i].ok()) {
|
||||||
result[i] = s[i].ToString();
|
result[i] = s[i].ToString();
|
||||||
} else {
|
} else {
|
||||||
result[i].assign(pin_values[i].data(), pin_values[i].size());
|
result[i].assign(pin_values[i].data(), pin_values[i].size());
|
||||||
|
// Increase likelihood of detecting potential use-after-free bugs with
|
||||||
|
// PinnableSlices tracking the same resource
|
||||||
|
pin_values[i].Reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -863,23 +879,25 @@ std::vector<std::string> DBTestBase::MultiGet(const std::vector<std::string>& k,
|
|||||||
options.verify_checksums = true;
|
options.verify_checksums = true;
|
||||||
options.snapshot = snapshot;
|
options.snapshot = snapshot;
|
||||||
std::vector<Slice> keys;
|
std::vector<Slice> keys;
|
||||||
std::vector<std::string> result;
|
std::vector<std::string> result(k.size());
|
||||||
std::vector<Status> statuses(k.size());
|
std::vector<Status> statuses(k.size());
|
||||||
std::vector<PinnableSlice> pin_values(k.size());
|
std::vector<PinnableSlice> pin_values(k.size());
|
||||||
|
|
||||||
for (unsigned int i = 0; i < k.size(); ++i) {
|
for (size_t i = 0; i < k.size(); ++i) {
|
||||||
keys.push_back(k[i]);
|
keys.push_back(k[i]);
|
||||||
}
|
}
|
||||||
db_->MultiGet(options, dbfull()->DefaultColumnFamily(), keys.size(),
|
db_->MultiGet(options, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
keys.data(), pin_values.data(), statuses.data());
|
keys.data(), pin_values.data(), statuses.data());
|
||||||
result.resize(k.size());
|
for (size_t i = 0; i < statuses.size(); ++i) {
|
||||||
for (auto iter = result.begin(); iter != result.end(); ++iter) {
|
|
||||||
iter->assign(pin_values[iter - result.begin()].data(),
|
|
||||||
pin_values[iter - result.begin()].size());
|
|
||||||
}
|
|
||||||
for (unsigned int i = 0; i < statuses.size(); ++i) {
|
|
||||||
if (statuses[i].IsNotFound()) {
|
if (statuses[i].IsNotFound()) {
|
||||||
result[i] = "NOT_FOUND";
|
result[i] = "NOT_FOUND";
|
||||||
|
} else if (!statuses[i].ok()) {
|
||||||
|
result[i] = statuses[i].ToString();
|
||||||
|
} else {
|
||||||
|
result[i].assign(pin_values[i].data(), pin_values[i].size());
|
||||||
|
// Increase likelihood of detecting potential use-after-free bugs with
|
||||||
|
// PinnableSlices tracking the same resource
|
||||||
|
pin_values[i].Reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
@ -1068,12 +1086,12 @@ int DBTestBase::NumTableFilesAtLevel(int level, int cf) {
|
|||||||
std::string property;
|
std::string property;
|
||||||
if (cf == 0) {
|
if (cf == 0) {
|
||||||
// default cfd
|
// default cfd
|
||||||
EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level),
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
&property));
|
"rocksdb.num-files-at-level" + std::to_string(level), &property));
|
||||||
} else {
|
} else {
|
||||||
EXPECT_TRUE(db_->GetProperty(handles_[cf],
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
"rocksdb.num-files-at-level" + ToString(level),
|
handles_[cf], "rocksdb.num-files-at-level" + std::to_string(level),
|
||||||
&property));
|
&property));
|
||||||
}
|
}
|
||||||
return atoi(property.c_str());
|
return atoi(property.c_str());
|
||||||
}
|
}
|
||||||
@ -1083,10 +1101,12 @@ double DBTestBase::CompressionRatioAtLevel(int level, int cf) {
|
|||||||
if (cf == 0) {
|
if (cf == 0) {
|
||||||
// default cfd
|
// default cfd
|
||||||
EXPECT_TRUE(db_->GetProperty(
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
"rocksdb.compression-ratio-at-level" + ToString(level), &property));
|
"rocksdb.compression-ratio-at-level" + std::to_string(level),
|
||||||
|
&property));
|
||||||
} else {
|
} else {
|
||||||
EXPECT_TRUE(db_->GetProperty(
|
EXPECT_TRUE(db_->GetProperty(
|
||||||
handles_[cf], "rocksdb.compression-ratio-at-level" + ToString(level),
|
handles_[cf],
|
||||||
|
"rocksdb.compression-ratio-at-level" + std::to_string(level),
|
||||||
&property));
|
&property));
|
||||||
}
|
}
|
||||||
return std::stod(property);
|
return std::stod(property);
|
||||||
|
@ -104,6 +104,9 @@ struct OptionsOverride {
|
|||||||
std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
|
std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
|
||||||
// These will be used only if filter_policy is set
|
// These will be used only if filter_policy is set
|
||||||
bool partition_filters = false;
|
bool partition_filters = false;
|
||||||
|
// Force using a default block cache. (Setting to false allows ASAN build
|
||||||
|
// use a trivially small block cache for better UAF error detection.)
|
||||||
|
bool full_block_cache = false;
|
||||||
uint64_t metadata_block_size = 1024;
|
uint64_t metadata_block_size = 1024;
|
||||||
|
|
||||||
// Used as a bit mask of individual enums in which to skip an XF test point
|
// Used as a bit mask of individual enums in which to skip an XF test point
|
||||||
|
@ -549,7 +549,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
|
|||||||
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize)));
|
ASSERT_OK(Put(1, std::to_string(key), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
@ -287,7 +287,6 @@ TEST_F(DBWALTest, Recover) {
|
|||||||
|
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
|
ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
|
||||||
ASSERT_EQ("v1", Get(1, "foo"));
|
ASSERT_EQ("v1", Get(1, "foo"));
|
||||||
|
|
||||||
ASSERT_EQ("v1", Get(1, "foo"));
|
ASSERT_EQ("v1", Get(1, "foo"));
|
||||||
ASSERT_EQ("v5", Get(1, "baz"));
|
ASSERT_EQ("v5", Get(1, "baz"));
|
||||||
ASSERT_OK(Put(1, "bar", "v2"));
|
ASSERT_OK(Put(1, "bar", "v2"));
|
||||||
@ -1010,7 +1009,7 @@ TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) {
|
|||||||
if (log_files.size() > 0) {
|
if (log_files.size() > 0) {
|
||||||
earliest_log_nums[i] = log_files[0]->LogNumber();
|
earliest_log_nums[i] = log_files[0]->LogNumber();
|
||||||
} else {
|
} else {
|
||||||
earliest_log_nums[i] = port::kMaxUint64;
|
earliest_log_nums[i] = std::numeric_limits<uint64_t>::max();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check at least the first WAL was cleaned up during the recovery.
|
// Check at least the first WAL was cleaned up during the recovery.
|
||||||
@ -1289,7 +1288,7 @@ class RecoveryTestHelper {
|
|||||||
|
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
for (int i = 0; i < kKeysPerWALFile; i++) {
|
for (int i = 0; i < kKeysPerWALFile; i++) {
|
||||||
std::string key = "key" + ToString((*count)++);
|
std::string key = "key" + std::to_string((*count)++);
|
||||||
std::string value = test->DummyString(kValueSize);
|
std::string value = test->DummyString(kValueSize);
|
||||||
ASSERT_NE(current_log_writer.get(), nullptr);
|
ASSERT_NE(current_log_writer.get(), nullptr);
|
||||||
uint64_t seq = versions->LastSequence() + 1;
|
uint64_t seq = versions->LastSequence() + 1;
|
||||||
@ -1320,7 +1319,7 @@ class RecoveryTestHelper {
|
|||||||
static size_t GetData(DBWALTestBase* test) {
|
static size_t GetData(DBWALTestBase* test) {
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
for (size_t i = 0; i < kWALFilesCount * kKeysPerWALFile; i++) {
|
for (size_t i = 0; i < kWALFilesCount * kKeysPerWALFile; i++) {
|
||||||
if (test->Get("key" + ToString(i)) != "NOT_FOUND") {
|
if (test->Get("key" + std::to_string(i)) != "NOT_FOUND") {
|
||||||
++count;
|
++count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1617,7 +1616,7 @@ TEST_P(DBWALTestWithParams, kPointInTimeRecovery) {
|
|||||||
if (!trunc || corrupt_offset != 0) {
|
if (!trunc || corrupt_offset != 0) {
|
||||||
bool expect_data = true;
|
bool expect_data = true;
|
||||||
for (size_t k = 0; k < maxkeys; ++k) {
|
for (size_t k = 0; k < maxkeys; ++k) {
|
||||||
bool found = Get("key" + ToString(k)) != "NOT_FOUND";
|
bool found = Get("key" + std::to_string(k)) != "NOT_FOUND";
|
||||||
if (expect_data && !found) {
|
if (expect_data && !found) {
|
||||||
expect_data = false;
|
expect_data = false;
|
||||||
}
|
}
|
||||||
@ -1753,7 +1752,7 @@ TEST_F(DBWALTest, RecoverWithoutFlush) {
|
|||||||
size_t count = RecoveryTestHelper::FillData(this, &options);
|
size_t count = RecoveryTestHelper::FillData(this, &options);
|
||||||
auto validateData = [this, count]() {
|
auto validateData = [this, count]() {
|
||||||
for (size_t i = 0; i < count; i++) {
|
for (size_t i = 0; i < count; i++) {
|
||||||
ASSERT_NE(Get("key" + ToString(i)), "NOT_FOUND");
|
ASSERT_NE(Get("key" + std::to_string(i)), "NOT_FOUND");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
@ -1892,7 +1891,7 @@ TEST_P(DBWALTestWithParamsVaryingRecoveryMode,
|
|||||||
ASSERT_OK(TryReopen(options));
|
ASSERT_OK(TryReopen(options));
|
||||||
// Append some more data.
|
// Append some more data.
|
||||||
for (int k = 0; k < kAppendKeys; k++) {
|
for (int k = 0; k < kAppendKeys; k++) {
|
||||||
std::string key = "extra_key" + ToString(k);
|
std::string key = "extra_key" + std::to_string(k);
|
||||||
std::string value = DummyString(RecoveryTestHelper::kValueSize);
|
std::string value = DummyString(RecoveryTestHelper::kValueSize);
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
}
|
}
|
||||||
@ -1926,7 +1925,7 @@ TEST_F(DBWALTest, RestoreTotalLogSizeAfterRecoverWithoutFlush) {
|
|||||||
std::string value_300k(300 * kKB, 'v');
|
std::string value_300k(300 * kKB, 'v');
|
||||||
ASSERT_OK(Put(0, "foo", "v1"));
|
ASSERT_OK(Put(0, "foo", "v1"));
|
||||||
for (int i = 0; i < 9; i++) {
|
for (int i = 0; i < 9; i++) {
|
||||||
ASSERT_OK(Put(1, "key" + ToString(i), value_100k));
|
ASSERT_OK(Put(1, "key" + std::to_string(i), value_100k));
|
||||||
}
|
}
|
||||||
// Get log files before reopen.
|
// Get log files before reopen.
|
||||||
VectorLogPtr log_files_before;
|
VectorLogPtr log_files_before;
|
||||||
|
@ -1492,8 +1492,8 @@ TEST_F(DBBasicTestWithTimestamp, MultiGetRangeFiltering) {
|
|||||||
|
|
||||||
// random data
|
// random data
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
auto key = ToString(i * 10);
|
auto key = std::to_string(i * 10);
|
||||||
auto value = ToString(i * 10);
|
auto value = std::to_string(i * 10);
|
||||||
Slice key_slice = key;
|
Slice key_slice = key;
|
||||||
Slice value_slice = value;
|
Slice value_slice = value;
|
||||||
ASSERT_OK(db_->Put(write_opts, key_slice, ts, value_slice));
|
ASSERT_OK(db_->Put(write_opts, key_slice, ts, value_slice));
|
||||||
@ -1824,8 +1824,8 @@ class DataVisibilityTest : public DBBasicTestWithTimestampBase {
|
|||||||
DataVisibilityTest() : DBBasicTestWithTimestampBase("data_visibility_test") {
|
DataVisibilityTest() : DBBasicTestWithTimestampBase("data_visibility_test") {
|
||||||
// Initialize test data
|
// Initialize test data
|
||||||
for (int i = 0; i < kTestDataSize; i++) {
|
for (int i = 0; i < kTestDataSize; i++) {
|
||||||
test_data_[i].key = "key" + ToString(i);
|
test_data_[i].key = "key" + std::to_string(i);
|
||||||
test_data_[i].value = "value" + ToString(i);
|
test_data_[i].value = "value" + std::to_string(i);
|
||||||
test_data_[i].timestamp = Timestamp(i, 0);
|
test_data_[i].timestamp = Timestamp(i, 0);
|
||||||
test_data_[i].ts = i;
|
test_data_[i].ts = i;
|
||||||
test_data_[i].seq_num = kMaxSequenceNumber;
|
test_data_[i].seq_num = kMaxSequenceNumber;
|
||||||
|
@ -289,7 +289,7 @@ TEST_P(DBWriteTest, IOErrorOnWALWritePropagateToWriteThreadFollower) {
|
|||||||
threads.push_back(port::Thread(
|
threads.push_back(port::Thread(
|
||||||
[&](int index) {
|
[&](int index) {
|
||||||
// All threads should fail.
|
// All threads should fail.
|
||||||
auto res = Put("key" + ToString(index), "value");
|
auto res = Put("key" + std::to_string(index), "value");
|
||||||
if (options.manual_wal_flush) {
|
if (options.manual_wal_flush) {
|
||||||
ASSERT_TRUE(res.ok());
|
ASSERT_TRUE(res.ok());
|
||||||
// we should see fs error when we do the flush
|
// we should see fs error when we do the flush
|
||||||
@ -322,13 +322,13 @@ TEST_P(DBWriteTest, ManualWalFlushInEffect) {
|
|||||||
Options options = GetOptions();
|
Options options = GetOptions();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
// try the 1st WAL created during open
|
// try the 1st WAL created during open
|
||||||
ASSERT_TRUE(Put("key" + ToString(0), "value").ok());
|
ASSERT_TRUE(Put("key" + std::to_string(0), "value").ok());
|
||||||
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
||||||
ASSERT_TRUE(dbfull()->FlushWAL(false).ok());
|
ASSERT_TRUE(dbfull()->FlushWAL(false).ok());
|
||||||
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty());
|
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty());
|
||||||
// try the 2nd wal created during SwitchWAL
|
// try the 2nd wal created during SwitchWAL
|
||||||
ASSERT_OK(dbfull()->TEST_SwitchWAL());
|
ASSERT_OK(dbfull()->TEST_SwitchWAL());
|
||||||
ASSERT_TRUE(Put("key" + ToString(0), "value").ok());
|
ASSERT_TRUE(Put("key" + std::to_string(0), "value").ok());
|
||||||
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
||||||
ASSERT_TRUE(dbfull()->FlushWAL(false).ok());
|
ASSERT_TRUE(dbfull()->FlushWAL(false).ok());
|
||||||
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty());
|
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty());
|
||||||
@ -344,7 +344,7 @@ TEST_P(DBWriteTest, IOErrorOnWALWriteTriggersReadOnlyMode) {
|
|||||||
// Forcibly fail WAL write for the first Put only. Subsequent Puts should
|
// Forcibly fail WAL write for the first Put only. Subsequent Puts should
|
||||||
// fail due to read-only mode
|
// fail due to read-only mode
|
||||||
mock_env->SetFilesystemActive(i != 0);
|
mock_env->SetFilesystemActive(i != 0);
|
||||||
auto res = Put("key" + ToString(i), "value");
|
auto res = Put("key" + std::to_string(i), "value");
|
||||||
// TSAN reports a false alarm for lock-order-inversion but Open and
|
// TSAN reports a false alarm for lock-order-inversion but Open and
|
||||||
// FlushWAL are not run concurrently. Disabling this until TSAN is
|
// FlushWAL are not run concurrently. Disabling this until TSAN is
|
||||||
// fixed.
|
// fixed.
|
||||||
@ -398,14 +398,14 @@ TEST_P(DBWriteTest, LockWalInEffect) {
|
|||||||
Options options = GetOptions();
|
Options options = GetOptions();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
// try the 1st WAL created during open
|
// try the 1st WAL created during open
|
||||||
ASSERT_OK(Put("key" + ToString(0), "value"));
|
ASSERT_OK(Put("key" + std::to_string(0), "value"));
|
||||||
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
||||||
ASSERT_OK(dbfull()->LockWAL());
|
ASSERT_OK(dbfull()->LockWAL());
|
||||||
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false));
|
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false));
|
||||||
ASSERT_OK(dbfull()->UnlockWAL());
|
ASSERT_OK(dbfull()->UnlockWAL());
|
||||||
// try the 2nd wal created during SwitchWAL
|
// try the 2nd wal created during SwitchWAL
|
||||||
ASSERT_OK(dbfull()->TEST_SwitchWAL());
|
ASSERT_OK(dbfull()->TEST_SwitchWAL());
|
||||||
ASSERT_OK(Put("key" + ToString(0), "value"));
|
ASSERT_OK(Put("key" + std::to_string(0), "value"));
|
||||||
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty());
|
||||||
ASSERT_OK(dbfull()->LockWAL());
|
ASSERT_OK(dbfull()->LockWAL());
|
||||||
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false));
|
ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false));
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user