Compare commits
3 Commits
main
...
6.1.fb.pro
Author | SHA1 | Date | |
---|---|---|---|
|
8c9ac08735 | ||
|
f7e0545692 | ||
|
2c3eaeb4db |
@ -1,911 +0,0 @@
|
||||
version: 2.1
|
||||
|
||||
orbs:
|
||||
win: circleci/windows@2.4.0
|
||||
|
||||
aliases:
|
||||
- ¬ify-on-main-failure
|
||||
fail_only: true
|
||||
only_for_branches: main
|
||||
|
||||
commands:
|
||||
install-cmake-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install cmake on macos
|
||||
command: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install cmake
|
||||
|
||||
install-jdk8-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install JDK 8 on macos
|
||||
command: |
|
||||
brew install --cask adoptopenjdk/openjdk/adoptopenjdk8
|
||||
|
||||
increase-max-open-files-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Increase max open files
|
||||
command: |
|
||||
sudo sysctl -w kern.maxfiles=1048576
|
||||
sudo sysctl -w kern.maxfilesperproc=1048576
|
||||
sudo launchctl limit maxfiles 1048576
|
||||
|
||||
pre-steps:
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Setup Environment Variables
|
||||
command: |
|
||||
echo "export GTEST_THROW_ON_FAILURE=0" >> $BASH_ENV
|
||||
echo "export GTEST_OUTPUT=\"xml:/tmp/test-results/\"" >> $BASH_ENV
|
||||
echo "export SKIP_FORMAT_BUCK_CHECKS=1" >> $BASH_ENV
|
||||
echo "export GTEST_COLOR=1" >> $BASH_ENV
|
||||
echo "export CTEST_OUTPUT_ON_FAILURE=1" >> $BASH_ENV
|
||||
echo "export CTEST_TEST_TIMEOUT=300" >> $BASH_ENV
|
||||
echo "export ZLIB_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zlib" >> $BASH_ENV
|
||||
echo "export BZIP2_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/bzip2" >> $BASH_ENV
|
||||
echo "export SNAPPY_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/snappy" >> $BASH_ENV
|
||||
echo "export LZ4_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/lz4" >> $BASH_ENV
|
||||
echo "export ZSTD_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zstd" >> $BASH_ENV
|
||||
|
||||
pre-steps-macos:
|
||||
steps:
|
||||
- pre-steps
|
||||
|
||||
post-steps:
|
||||
steps:
|
||||
- store_test_results: # store test result if there's any
|
||||
path: /tmp/test-results
|
||||
- store_artifacts: # store LOG for debugging if there's any
|
||||
path: LOG
|
||||
- run: # on fail, compress Test Logs for diagnosing the issue
|
||||
name: Compress Test Logs
|
||||
command: tar -cvzf t.tar.gz t
|
||||
when: on_fail
|
||||
- store_artifacts: # on fail, store Test Logs for diagnosing the issue
|
||||
path: t.tar.gz
|
||||
destination: test_logs
|
||||
when: on_fail
|
||||
|
||||
install-clang-10:
|
||||
steps:
|
||||
- run:
|
||||
name: Install Clang 10
|
||||
command: |
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list
|
||||
echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable
|
||||
sudo apt-get update -y && sudo apt-get install -y clang-10
|
||||
|
||||
install-clang-13:
|
||||
steps:
|
||||
- run:
|
||||
name: Install Clang 13
|
||||
command: |
|
||||
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list
|
||||
echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||
sudo apt-get update -y && sudo apt-get install -y clang-13
|
||||
|
||||
install-gflags:
|
||||
steps:
|
||||
- run:
|
||||
name: Install gflags
|
||||
command: |
|
||||
sudo apt-get update -y && sudo apt-get install -y libgflags-dev
|
||||
|
||||
install-benchmark:
|
||||
steps:
|
||||
- run:
|
||||
name: Install ninja build
|
||||
command: sudo apt-get update -y && sudo apt-get install -y ninja-build
|
||||
- run:
|
||||
name: Install benchmark
|
||||
command: |
|
||||
git clone --depth 1 --branch v1.6.1 https://github.com/google/benchmark.git ~/benchmark
|
||||
cd ~/benchmark && mkdir build && cd build
|
||||
cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0
|
||||
ninja && sudo ninja install
|
||||
|
||||
install-valgrind:
|
||||
steps:
|
||||
- run:
|
||||
name: Install valgrind
|
||||
command: sudo apt-get update -y && sudo apt-get install -y valgrind
|
||||
|
||||
upgrade-cmake:
|
||||
steps:
|
||||
- run:
|
||||
name: Upgrade cmake
|
||||
command: |
|
||||
sudo apt remove --purge cmake
|
||||
sudo snap install cmake --classic
|
||||
|
||||
install-gflags-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install gflags on macos
|
||||
command: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags
|
||||
|
||||
install-gtest-parallel:
|
||||
steps:
|
||||
- run:
|
||||
name: Install gtest-parallel
|
||||
command: |
|
||||
git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel
|
||||
echo 'export PATH=$HOME/gtest-parallel:$PATH' >> $BASH_ENV
|
||||
|
||||
install-compression-libs:
|
||||
steps:
|
||||
- run:
|
||||
name: Install compression libs
|
||||
command: |
|
||||
sudo apt-get update -y && sudo apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
|
||||
|
||||
install-libprotobuf-mutator:
|
||||
steps:
|
||||
- run:
|
||||
name: Install libprotobuf-mutator libs
|
||||
command: |
|
||||
git clone --single-branch --branch master --depth 1 git@github.com:google/libprotobuf-mutator.git ~/libprotobuf-mutator
|
||||
cd ~/libprotobuf-mutator && mkdir build && cd build
|
||||
cmake .. -GNinja -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang++-13 -DCMAKE_BUILD_TYPE=Release -DLIB_PROTO_MUTATOR_DOWNLOAD_PROTOBUF=ON
|
||||
ninja && sudo ninja install
|
||||
- run:
|
||||
name: Setup environment variables
|
||||
command: |
|
||||
echo "export PKG_CONFIG_PATH=/usr/local/OFF/:~/libprotobuf-mutator/build/external.protobuf/lib/pkgconfig/" >> $BASH_ENV
|
||||
echo "export PROTOC_BIN=~/libprotobuf-mutator/build/external.protobuf/bin/protoc" >> $BASH_ENV
|
||||
|
||||
executors:
|
||||
windows-2xlarge:
|
||||
machine:
|
||||
image: 'windows-server-2019-vs2019:stable'
|
||||
resource_class: windows.2xlarge
|
||||
shell: bash.exe
|
||||
|
||||
jobs:
|
||||
build-macos:
|
||||
macos:
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
environment:
|
||||
ROCKSDB_DISABLE_JEMALLOC: 1 # jemalloc cause env_test hang, disable it for now
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- pre-steps-macos
|
||||
- run: ulimit -S -n `ulimit -H -n` && OPT=-DCIRCLECI make V=1 J=32 -j32 all
|
||||
- post-steps
|
||||
|
||||
build-macos-cmake:
|
||||
macos:
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
parameters:
|
||||
run_even_tests:
|
||||
description: run even or odd tests, used to split tests to 2 groups
|
||||
type: boolean
|
||||
default: true
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-cmake-on-macos
|
||||
- install-gflags-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "cmake generate project file"
|
||||
command: ulimit -S -n `ulimit -H -n` && mkdir build && cd build && cmake -DWITH_GFLAGS=1 ..
|
||||
- run:
|
||||
name: "Build tests"
|
||||
command: cd build && make V=1 -j32
|
||||
- when:
|
||||
condition: << parameters.run_even_tests >>
|
||||
steps:
|
||||
- run:
|
||||
name: "Run even tests"
|
||||
command: ulimit -S -n `ulimit -H -n` && cd build && ctest -j32 -I 0,,2
|
||||
- when:
|
||||
condition:
|
||||
not: << parameters.run_even_tests >>
|
||||
steps:
|
||||
- run:
|
||||
name: "Run odd tests"
|
||||
command: ulimit -S -n `ulimit -H -n` && cd build && ctest -j32 -I 1,,2
|
||||
- post-steps
|
||||
|
||||
build-linux:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: make V=1 J=32 -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-encrypted_env-no_compression:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
|
||||
- run: |
|
||||
./sst_dump --help | egrep -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression
|
||||
- post-steps
|
||||
|
||||
build-linux-shared_lib-alt_namespace-status_checked:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-release:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: make V=1 -j32 release
|
||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||
- install-gflags
|
||||
- run: make V=1 -j32 release
|
||||
- run: ./db_stress --version # ensure with gflags
|
||||
- post-steps
|
||||
|
||||
build-linux-release-rtti:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: make clean
|
||||
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||
- run: sudo apt-get update -y && sudo apt-get install -y libgflags-dev
|
||||
- run: make clean
|
||||
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
||||
- run: ./db_stress --version # ensure with gflags
|
||||
|
||||
build-linux-lite:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: LITE=1 make V=1 J=8 -j8 check
|
||||
- post-steps
|
||||
|
||||
build-linux-lite-release:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: LITE=1 make V=1 -j8 release
|
||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||
- install-gflags
|
||||
- run: LITE=1 make V=1 -j8 release
|
||||
- run: ./db_stress --version # ensure with gflags
|
||||
- post-steps
|
||||
|
||||
build-linux-clang-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: sudo apt-get update -y && sudo apt-get install -y clang libgflags-dev libtbb-dev
|
||||
- run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-asan:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-clang-10
|
||||
- run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-mini-tsan:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
# find test list by `make list_all_tests`
|
||||
parameters:
|
||||
start_test:
|
||||
default: ""
|
||||
type: string
|
||||
end_test:
|
||||
default: ""
|
||||
type: string
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-clang-10
|
||||
- install-gtest-parallel
|
||||
- run:
|
||||
name: "Build unit tests"
|
||||
command: |
|
||||
echo "env: $(env)"
|
||||
ROCKSDBTESTS_START=<<parameters.start_test>> ROCKSDBTESTS_END=<<parameters.end_test>> ROCKSDBTESTS_SUBSET_TESTS_TO_FILE=/tmp/test_list COMPILE_WITH_TSAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 --output-sync=target build_subset_tests
|
||||
- run:
|
||||
name: "Run unit tests in parallel"
|
||||
command: |
|
||||
sed -i 's/[[:space:]]*$//; s/ / \.\//g; s/.*/.\/&/' /tmp/test_list
|
||||
cat /tmp/test_list
|
||||
gtest-parallel $(</tmp/test_list) --output_dir=/tmp | cat # pipe to cat to continuously output status on circleci UI. Otherwise, no status will be printed while the job is running.
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-ubsan:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-clang-10
|
||||
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
|
||||
- post-steps
|
||||
|
||||
build-linux-valgrind:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-valgrind
|
||||
- run: PORTABLE=1 make V=1 -j32 valgrind_test
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-clang-analyze:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-clang-10
|
||||
- run: sudo apt-get update -y && sudo apt-get install -y clang-tools-10
|
||||
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
|
||||
- post-steps
|
||||
|
||||
build-linux-cmake-with-folly:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- upgrade-cmake
|
||||
- run: make checkout_folly
|
||||
- run: (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20)
|
||||
- post-steps
|
||||
|
||||
build-linux-cmake-with-benchmark:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-benchmark
|
||||
- run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20)
|
||||
- post-steps
|
||||
|
||||
build-linux-unity-and-headers:
|
||||
docker: # executor type
|
||||
- image: gcc:latest
|
||||
environment:
|
||||
EXTRA_CXXFLAGS: -mno-avx512f # Warnings-as-error in avx512fintrin.h, would be used on newer hardware
|
||||
resource_class: large
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: apt-get update -y && apt-get install -y libgflags-dev
|
||||
- run: make V=1 -j8 unity_test
|
||||
- run: make V=1 -j8 -k check-headers # could be moved to a different build
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-7-with-folly:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-7 g++-7 libgflags-dev
|
||||
- run: make checkout_folly
|
||||
- run: USE_FOLLY=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-8-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-8 g++-8 libgflags-dev
|
||||
- run: CC=gcc-8 CXX=g++-8 V=1 make -j16 all
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-10-cxx20-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: sudo apt-get update -y && sudo apt-get install gcc-10 g++-10 libgflags-dev
|
||||
- run: CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j16 all
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-11-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-11 g++-11 libgflags-dev
|
||||
- install-benchmark
|
||||
- run: CC=gcc-11 CXX=g++-11 V=1 make -j16 all microbench
|
||||
- post-steps
|
||||
|
||||
build-linux-clang-13-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-clang-13
|
||||
- install-benchmark
|
||||
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j16 all microbench
|
||||
- post-steps
|
||||
|
||||
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
|
||||
build-linux-clang-13-asan-ubsan-with-folly:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-clang-13
|
||||
- install-gflags
|
||||
- run: make checkout_folly
|
||||
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check
|
||||
- post-steps
|
||||
|
||||
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
|
||||
build-linux-run-microbench:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-benchmark
|
||||
- run: DEBUG_LEVEL=0 make -j32 run_microbench
|
||||
- post-steps
|
||||
|
||||
build-linux-mini-crashtest:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-compression-libs
|
||||
- run: ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS=--duration=960 blackbox_crash_test_with_atomic_flush
|
||||
- post-steps
|
||||
|
||||
build-windows:
|
||||
executor: windows-2xlarge
|
||||
parameters:
|
||||
extra_cmake_opt:
|
||||
default: ""
|
||||
type: string
|
||||
vs_year:
|
||||
default: "2019"
|
||||
type: string
|
||||
cmake_generator:
|
||||
default: "Visual Studio 16 2019"
|
||||
type: string
|
||||
environment:
|
||||
THIRDPARTY_HOME: C:/Users/circleci/thirdparty
|
||||
CMAKE_HOME: C:/Users/circleci/thirdparty/cmake-3.16.4-win64-x64
|
||||
CMAKE_BIN: C:/Users/circleci/thirdparty/cmake-3.16.4-win64-x64/bin/cmake.exe
|
||||
SNAPPY_HOME: C:/Users/circleci/thirdparty/snappy-1.1.7
|
||||
SNAPPY_INCLUDE: C:/Users/circleci/thirdparty/snappy-1.1.7;C:/Users/circleci/thirdparty/snappy-1.1.7/build
|
||||
SNAPPY_LIB_DEBUG: C:/Users/circleci/thirdparty/snappy-1.1.7/build/Debug/snappy.lib
|
||||
VS_YEAR: <<parameters.vs_year>>
|
||||
CMAKE_GENERATOR: <<parameters.cmake_generator>>
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Setup VS"
|
||||
command: |
|
||||
if [[ "${VS_YEAR}" == "2019" ]]; then
|
||||
echo "VS2019 already present."
|
||||
elif [[ "${VS_YEAR}" == "2017" ]]; then
|
||||
echo "Installing VS2017..."
|
||||
powershell .circleci/vs2017_install.ps1
|
||||
elif [[ "${VS_YEAR}" == "2015" ]]; then
|
||||
echo "Installing VS2015..."
|
||||
powershell .circleci/vs2015_install.ps1
|
||||
fi
|
||||
- store_artifacts:
|
||||
path: \Users\circleci\AppData\Local\Temp\vslogs.zip
|
||||
- run:
|
||||
name: "Install thirdparty dependencies"
|
||||
command: |
|
||||
mkdir ${THIRDPARTY_HOME}
|
||||
cd ${THIRDPARTY_HOME}
|
||||
echo "Installing CMake..."
|
||||
curl --fail --silent --show-error --output cmake-3.16.4-win64-x64.zip --location https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-win64-x64.zip
|
||||
unzip -q cmake-3.16.4-win64-x64.zip
|
||||
echo "Building Snappy dependency..."
|
||||
curl --fail --silent --show-error --output snappy-1.1.7.zip --location https://github.com/google/snappy/archive/1.1.7.zip
|
||||
unzip -q snappy-1.1.7.zip
|
||||
cd snappy-1.1.7
|
||||
mkdir build
|
||||
cd build
|
||||
${CMAKE_BIN} -G "${CMAKE_GENERATOR}" ..
|
||||
msbuild.exe Snappy.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64
|
||||
- run:
|
||||
name: "Build RocksDB"
|
||||
command: |
|
||||
mkdir build
|
||||
cd build
|
||||
${CMAKE_BIN} -G "${CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=Debug -DOPTDBG=1 -DPORTABLE=1 -DSNAPPY=1 -DJNI=1 << parameters.extra_cmake_opt >> ..
|
||||
cd ..
|
||||
echo "Building with VS version: ${CMAKE_GENERATOR}"
|
||||
msbuild.exe build/rocksdb.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64
|
||||
- run:
|
||||
name: "Test RocksDB"
|
||||
shell: powershell.exe
|
||||
command: |
|
||||
build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
|
||||
|
||||
build-linux-java:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
environment:
|
||||
JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Test RocksDBJava"
|
||||
command: make V=1 J=8 -j8 jtest
|
||||
- post-steps
|
||||
|
||||
build-linux-java-static:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
environment:
|
||||
JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build RocksDBJava Static Library"
|
||||
command: make V=1 J=8 -j8 rocksdbjavastatic
|
||||
- post-steps
|
||||
|
||||
build-macos-java:
|
||||
macos:
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
|
||||
ROCKSDB_DISABLE_JEMALLOC: 1 # jemalloc causes java 8 crash
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Test RocksDBJava"
|
||||
command: make V=1 J=16 -j16 jtest
|
||||
- post-steps
|
||||
|
||||
build-macos-java-static:
|
||||
macos:
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-cmake-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build RocksDBJava x86 and ARM Static Libraries"
|
||||
command: make V=1 J=16 -j16 rocksdbjavastaticosx
|
||||
- post-steps
|
||||
|
||||
build-macos-java-static-universal:
|
||||
macos:
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-cmake-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build RocksDBJava Universal Binary Static Library"
|
||||
command: make V=1 J=16 -j16 rocksdbjavastaticosx_ub
|
||||
- post-steps
|
||||
|
||||
build-examples:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run:
|
||||
name: "Build examples"
|
||||
command: |
|
||||
OPT=-DTRAVIS V=1 make -j4 static_lib && cd examples && make -j4
|
||||
- post-steps
|
||||
|
||||
build-cmake-mingw:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: sudo apt-get update -y && sudo apt-get install -y mingw-w64
|
||||
- run: sudo update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
|
||||
- run:
|
||||
name: "Build cmake-mingw"
|
||||
command: |
|
||||
sudo apt-get install snapd && sudo snap install cmake --beta --classic
|
||||
export PATH=/snap/bin:$PATH
|
||||
sudo apt-get install -y openjdk-8-jdk
|
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
|
||||
export PATH=$JAVA_HOME/bin:$PATH
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
mkdir build && cd build && cmake -DJNI=1 -DWITH_GFLAGS=OFF .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni
|
||||
- post-steps
|
||||
|
||||
build-linux-non-shm:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
environment:
|
||||
TEST_TMPDIR: /tmp/rocksdb_test_tmp
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: make V=1 -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-arm-test-full:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: arm.large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: make V=1 J=4 -j4 check
|
||||
- post-steps
|
||||
|
||||
build-linux-arm:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: arm.large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: ROCKSDBTESTS_PLATFORM_DEPENDENT=only make V=1 J=4 -j4 all_but_some_tests check_some
|
||||
- post-steps
|
||||
|
||||
build-linux-arm-cmake-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: arm.large
|
||||
environment:
|
||||
JAVA_HOME: /usr/lib/jvm/java-8-openjdk-arm64
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build with cmake"
|
||||
command: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTS=0 -DWITH_GFLAGS=1 -DWITH_BENCHMARK_TOOLS=0 -DWITH_TOOLS=0 -DWITH_CORE_TOOLS=1 ..
|
||||
make -j4
|
||||
- run:
|
||||
name: "Build Java with cmake"
|
||||
command: |
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DJNI=1 -DCMAKE_BUILD_TYPE=Release -DWITH_GFLAGS=1 ..
|
||||
make -j4 rocksdb rocksdbjni
|
||||
- post-steps
|
||||
|
||||
build-format-compatible:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- install-compression-libs
|
||||
- run:
|
||||
name: "test"
|
||||
command: |
|
||||
export TEST_TMPDIR=/dev/shm/rocksdb
|
||||
rm -rf /dev/shm/rocksdb
|
||||
mkdir /dev/shm/rocksdb
|
||||
tools/check_format_compatible.sh
|
||||
- post-steps
|
||||
|
||||
build-fuzzers:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-clang-13
|
||||
- run: sudo apt-get update -y && sudo apt-get install -y cmake ninja-build binutils liblzma-dev libz-dev pkg-config autoconf libtool
|
||||
- install-libprotobuf-mutator
|
||||
- run:
|
||||
name: "Build rocksdb lib"
|
||||
command: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j4 static_lib
|
||||
- run:
|
||||
name: "Build fuzzers"
|
||||
command: cd fuzz && make sst_file_writer_fuzzer db_fuzzer db_map_fuzzer
|
||||
- post-steps
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
jobs-linux-run-tests:
|
||||
jobs:
|
||||
- build-linux
|
||||
- build-linux-cmake-with-folly
|
||||
- build-linux-gcc-7-with-folly
|
||||
- build-linux-cmake-with-benchmark
|
||||
- build-linux-encrypted_env-no_compression
|
||||
- build-linux-lite
|
||||
jobs-linux-run-tests-san:
|
||||
jobs:
|
||||
- build-linux-clang10-asan
|
||||
- build-linux-clang10-ubsan
|
||||
- build-linux-clang10-mini-tsan:
|
||||
start_test: ""
|
||||
end_test: "env_test"
|
||||
- build-linux-clang10-mini-tsan:
|
||||
start_test: "env_test"
|
||||
end_test: ""
|
||||
- build-linux-shared_lib-alt_namespace-status_checked
|
||||
jobs-linux-no-test-run:
|
||||
jobs:
|
||||
- build-linux-release
|
||||
- build-linux-release-rtti
|
||||
- build-linux-lite-release
|
||||
- build-examples
|
||||
- build-fuzzers
|
||||
- build-linux-clang-no_test_run
|
||||
- build-linux-clang-13-no_test_run
|
||||
- build-linux-gcc-8-no_test_run
|
||||
- build-linux-gcc-10-cxx20-no_test_run
|
||||
- build-linux-gcc-11-no_test_run
|
||||
- build-linux-arm-cmake-no_test_run
|
||||
jobs-linux-other-checks:
|
||||
jobs:
|
||||
- build-linux-clang10-clang-analyze
|
||||
- build-linux-unity-and-headers
|
||||
- build-linux-mini-crashtest
|
||||
jobs-windows:
|
||||
jobs:
|
||||
- build-windows:
|
||||
name: "build-windows-vs2019"
|
||||
- build-windows:
|
||||
name: "build-windows-vs2019-cxx20"
|
||||
extra_cmake_opt: -DCMAKE_CXX_STANDARD=20
|
||||
- build-windows:
|
||||
name: "build-windows-vs2017"
|
||||
vs_year: "2017"
|
||||
cmake_generator: "Visual Studio 15 Win64"
|
||||
- build-cmake-mingw
|
||||
jobs-java:
|
||||
jobs:
|
||||
- build-linux-java
|
||||
- build-linux-java-static
|
||||
- build-macos-java
|
||||
- build-macos-java-static
|
||||
- build-macos-java-static-universal
|
||||
jobs-macos:
|
||||
jobs:
|
||||
- build-macos
|
||||
- build-macos-cmake:
|
||||
run_even_tests: true
|
||||
- build-macos-cmake:
|
||||
run_even_tests: false
|
||||
jobs-linux-arm:
|
||||
jobs:
|
||||
- build-linux-arm
|
||||
nightly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 9 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
jobs:
|
||||
- build-format-compatible
|
||||
- build-linux-arm-test-full
|
||||
- build-linux-run-microbench
|
||||
- build-linux-non-shm
|
||||
- build-linux-clang-13-asan-ubsan-with-folly
|
||||
- build-linux-valgrind
|
@ -1,6 +0,0 @@
|
||||
# Supress UBSAN warnings related to stl_tree.h, e.g.
|
||||
# UndefinedBehaviorSanitizer: undefined-behavior /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/stl_tree.h:1505:43 in
|
||||
# /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/stl_tree.h:1505:43:
|
||||
# runtime error: upcast of address 0x000001fa8820 with insufficient space for an object of type
|
||||
# 'std::_Rb_tree_node<std::pair<const std::__cxx11::basic_string<char>, rocksdb::(anonymous namespace)::LockHoldingInfo> >'
|
||||
src:*bits/stl_tree.h
|
@ -1,24 +0,0 @@
|
||||
$VS_DOWNLOAD_LINK = "https://go.microsoft.com/fwlink/?LinkId=691126"
|
||||
$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe"
|
||||
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS 2015 installer failed"
|
||||
exit 1
|
||||
}
|
||||
$VS_INSTALL_ARGS = @("/Quiet", "/NoRestart")
|
||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
||||
Remove-Item -Path vs_installer.exe -Force
|
||||
$exitCode = $process.ExitCode
|
||||
if (($exitCode -ne 0) -and ($exitCode -ne 3010)) {
|
||||
echo "VS 2015 installer exited with code $exitCode, which should be one of [0, 3010]."
|
||||
curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS Collect tool failed."
|
||||
exit 1
|
||||
}
|
||||
Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru
|
||||
New-Item -Path "C:\w\build-results" -ItemType "directory" -Force
|
||||
Copy-Item -Path "C:\Users\circleci\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\"
|
||||
exit 1
|
||||
}
|
||||
echo "VS 2015 installed."
|
@ -1,35 +0,0 @@
|
||||
$VS_DOWNLOAD_LINK = "https://aka.ms/vs/15/release/vs_buildtools.exe"
|
||||
$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe"
|
||||
$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Tools.14.13",
|
||||
"--add Microsoft.Component.MSBuild",
|
||||
"--add Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
||||
"--add Microsoft.VisualStudio.Component.TextTemplating",
|
||||
"--add Microsoft.VisualStudio.Component.VC.CoreIde",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81")
|
||||
|
||||
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS 2017 installer failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
||||
Remove-Item -Path vs_installer.exe -Force
|
||||
$exitCode = $process.ExitCode
|
||||
if (($exitCode -ne 0) -and ($exitCode -ne 3010)) {
|
||||
echo "VS 2017 installer exited with code $exitCode, which should be one of [0, 3010]."
|
||||
curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS Collect tool failed."
|
||||
exit 1
|
||||
}
|
||||
Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru
|
||||
New-Item -Path "C:\w\build-results" -ItemType "directory" -Force
|
||||
Copy-Item -Path "C:\Users\circleci\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\"
|
||||
exit 1
|
||||
}
|
||||
echo "VS 2017 installed."
|
44
.github/workflows/sanity_check.yml
vendored
44
.github/workflows/sanity_check.yml
vendored
@ -1,44 +0,0 @@
|
||||
name: Check buck targets and code format
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
check:
|
||||
name: Check TARGETS file and code format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout feature branch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch from upstream
|
||||
run: |
|
||||
git remote add upstream https://github.com/facebook/rocksdb.git && git fetch upstream
|
||||
|
||||
- name: Where am I
|
||||
run: |
|
||||
echo git status && git status
|
||||
echo "git remote -v" && git remote -v
|
||||
echo git branch && git branch
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
|
||||
- name: Install Dependencies
|
||||
run: python -m pip install --upgrade pip
|
||||
|
||||
- name: Install argparse
|
||||
run: pip install argparse
|
||||
|
||||
- name: Download clang-format-diff.py
|
||||
uses: wei/wget@v1
|
||||
with:
|
||||
args: https://raw.githubusercontent.com/llvm/llvm-project/release/12.x/clang/tools/clang-format/clang-format-diff.py
|
||||
|
||||
- name: Check format
|
||||
run: VERBOSE_CHECK=1 make check-format
|
||||
|
||||
- name: Compare buckify output
|
||||
run: make check-buck-targets
|
||||
|
||||
- name: Simple source code checks
|
||||
run: make check-sources
|
22
.gitignore
vendored
22
.gitignore
vendored
@ -1,5 +1,4 @@
|
||||
make_config.mk
|
||||
rocksdb.pc
|
||||
|
||||
*.a
|
||||
*.arc
|
||||
@ -8,7 +7,6 @@ rocksdb.pc
|
||||
*.gcda
|
||||
*.gcno
|
||||
*.o
|
||||
*.o.tmp
|
||||
*.so
|
||||
*.so.*
|
||||
*_test
|
||||
@ -26,7 +24,6 @@ rocksdb.pc
|
||||
*.vcxproj.filters
|
||||
*.sln
|
||||
*.cmake
|
||||
.watchmanconfig
|
||||
CMakeCache.txt
|
||||
CMakeFiles/
|
||||
build/
|
||||
@ -35,9 +32,6 @@ ldb
|
||||
manifest_dump
|
||||
sst_dump
|
||||
blob_dump
|
||||
block_cache_trace_analyzer
|
||||
db_with_timestamp_basic_test
|
||||
tools/block_cache_analyzer/*.pyc
|
||||
column_aware_encoding_exp
|
||||
util/build_version.cc
|
||||
build_tools/VALGRIND_LOGS/
|
||||
@ -53,11 +47,6 @@ rocksdb_undump
|
||||
db_test2
|
||||
trace_analyzer
|
||||
trace_analyzer_test
|
||||
block_cache_trace_analyzer
|
||||
io_tracer_parser
|
||||
.DS_Store
|
||||
.vs
|
||||
.vscode
|
||||
|
||||
java/out
|
||||
java/target
|
||||
@ -85,14 +74,3 @@ tp2/
|
||||
fbcode/
|
||||
fbcode
|
||||
buckifier/*.pyc
|
||||
buckifier/__pycache__
|
||||
|
||||
compile_commands.json
|
||||
clang-format-diff.py
|
||||
.py3/
|
||||
|
||||
fuzz/proto/gen/
|
||||
fuzz/crash-*
|
||||
|
||||
cmake-build-*
|
||||
third-party/folly/
|
||||
|
247
.travis.yml
247
.travis.yml
@ -1,31 +1,31 @@
|
||||
dist: xenial
|
||||
sudo: false
|
||||
dist: trusty
|
||||
language: cpp
|
||||
os:
|
||||
- linux
|
||||
arch:
|
||||
- arm64
|
||||
- ppc64le
|
||||
- s390x
|
||||
- osx
|
||||
compiler:
|
||||
- clang
|
||||
- gcc
|
||||
osx_image: xcode8.3
|
||||
jdk:
|
||||
- oraclejdk7
|
||||
cache:
|
||||
- ccache
|
||||
- apt
|
||||
|
||||
addons:
|
||||
apt:
|
||||
update: true
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
packages:
|
||||
- libgflags-dev
|
||||
- curl
|
||||
- g++-8
|
||||
- libbz2-dev
|
||||
- liblz4-dev
|
||||
- libgflags-dev
|
||||
- libsnappy-dev
|
||||
- liblzma-dev # xv
|
||||
- libzstd-dev
|
||||
- mingw-w64
|
||||
- zlib1g-dev
|
||||
|
||||
env:
|
||||
- TEST_GROUP=platform_dependent # 16-18 minutes
|
||||
- TEST_GROUP=1 # 33-35 minutes
|
||||
@ -40,186 +40,38 @@ env:
|
||||
- JOB_NAME=examples # 5-7 minutes
|
||||
- JOB_NAME=cmake # 3-5 minutes
|
||||
- JOB_NAME=cmake-gcc8 # 3-5 minutes
|
||||
- JOB_NAME=cmake-gcc9 # 3-5 minutes
|
||||
- JOB_NAME=cmake-gcc9-c++20 # 3-5 minutes
|
||||
- JOB_NAME=cmake-mingw # 3 minutes
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
env: TEST_GROUP=1
|
||||
- os: osx
|
||||
env: TEST_GROUP=2
|
||||
- os: osx
|
||||
env: TEST_GROUP=3
|
||||
- os: osx
|
||||
env: TEST_GROUP=4
|
||||
- os: osx
|
||||
env: JOB_NAME=cmake-gcc8
|
||||
- os : osx
|
||||
env: JOB_NAME=cmake-mingw
|
||||
- os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=cmake-mingw
|
||||
- os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=cmake-mingw
|
||||
- os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=cmake-mingw
|
||||
- os: linux
|
||||
compiler: clang
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: arm64
|
||||
env: TEST_GROUP=platform_dependent
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: TEST_GROUP=1
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: TEST_GROUP=1
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: TEST_GROUP=1
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: TEST_GROUP=2
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: TEST_GROUP=2
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: TEST_GROUP=2
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: TEST_GROUP=3
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: TEST_GROUP=3
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: TEST_GROUP=3
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: TEST_GROUP=4
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: TEST_GROUP=4
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: TEST_GROUP=4
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=cmake
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/ AND commit_message !~ /java/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=java_test
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/ AND commit_message !~ /java/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=java_test
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/ AND commit_message !~ /java/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=java_test
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=lite_build
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=lite_build
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=lite_build
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=examples
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=examples
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=examples
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=cmake-gcc8
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=cmake-gcc8
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=cmake-gcc8
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=cmake-gcc9
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=cmake-gcc9
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=cmake-gcc9
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os : linux
|
||||
arch: arm64
|
||||
env: JOB_NAME=cmake-gcc9-c++20
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: ppc64le
|
||||
env: JOB_NAME=cmake-gcc9-c++20
|
||||
- if: type = pull_request AND commit_message !~ /FULL_CI/
|
||||
os: linux
|
||||
arch: s390x
|
||||
env: JOB_NAME=cmake-gcc9-c++20
|
||||
- os : osx
|
||||
compiler: gcc
|
||||
|
||||
# https://docs.travis-ci.com/user/caching/#ccache-cache
|
||||
install:
|
||||
- CC=gcc-7 && CXX=g++-7
|
||||
- if [ "${TRAVIS_OS_NAME}" == osx ]; then
|
||||
brew install ccache zstd lz4 snappy xz;
|
||||
PATH=$PATH:/usr/local/opt/ccache/libexec;
|
||||
fi
|
||||
- if [ "${JOB_NAME}" == cmake-gcc8 ]; then
|
||||
sudo apt-get install -y g++-8 || exit $?;
|
||||
CC=gcc-8 && CXX=g++-8;
|
||||
fi
|
||||
- if [ "${JOB_NAME}" == cmake-gcc9 ] || [ "${JOB_NAME}" == cmake-gcc9-c++20 ]; then
|
||||
sudo apt-get install -y g++-9 || exit $?;
|
||||
CC=gcc-9 && CXX=g++-9;
|
||||
fi
|
||||
- if [ "${JOB_NAME}" == cmake-mingw ]; then
|
||||
sudo apt-get install -y mingw-w64 || exit $?;
|
||||
fi
|
||||
- if [ "${CXX}" == "g++-7" ]; then
|
||||
sudo apt-get install -y g++-7 || exit $?;
|
||||
fi
|
||||
- |
|
||||
if [[ "${JOB_NAME}" == cmake* ]]; then
|
||||
sudo apt-get remove -y cmake cmake-data
|
||||
export CMAKE_DEB="cmake-3.14.5-Linux-$(uname -m).deb"
|
||||
export CMAKE_DEB_URL="https://rocksdb-deps.s3-us-west-2.amazonaws.com/cmake/${CMAKE_DEB}"
|
||||
curl --silent --fail --show-error --location --output "${CMAKE_DEB}" "${CMAKE_DEB_URL}" || exit $?
|
||||
sudo dpkg -i "${CMAKE_DEB}" || exit $?
|
||||
which cmake && cmake --version
|
||||
fi
|
||||
- |
|
||||
if [[ "${JOB_NAME}" == java_test || "${JOB_NAME}" == cmake* ]]; then
|
||||
# Ensure JDK 8
|
||||
sudo apt-get install -y openjdk-8-jdk || exit $?
|
||||
export PATH=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)/bin:$PATH
|
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- if [[ "${JOB_NAME}" == cmake* ]] && [ "${TRAVIS_OS_NAME}" == linux ]; then
|
||||
mkdir cmake-dist && curl -sfSL https://cmake.org/files/v3.8/cmake-3.8.1-Linux-x86_64.tar.gz | tar --strip-components=1 -C cmake-dist -xz && export PATH=$PWD/cmake-dist/bin:$PATH;
|
||||
fi
|
||||
|
||||
before_script:
|
||||
@ -228,53 +80,44 @@ before_script:
|
||||
- ulimit -n 8192
|
||||
|
||||
script:
|
||||
- date; ${CXX} --version
|
||||
- ${CXX} --version
|
||||
- if [ `command -v ccache` ]; then ccache -C; fi
|
||||
- export MK_PARALLEL=4;
|
||||
if [[ "$TRAVIS_CPU_ARCH" == s390x ]]; then
|
||||
export MK_PARALLEL=1;
|
||||
fi
|
||||
- case $TEST_GROUP in
|
||||
platform_dependent)
|
||||
OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=only make -j$MK_PARALLEL all_but_some_tests check_some
|
||||
OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 all_but_some_tests check_some
|
||||
;;
|
||||
1)
|
||||
OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_END=backup_engine_test make -j$MK_PARALLEL check_some
|
||||
OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=db_block_cache_test ROCKSDBTESTS_END=full_filter_block_test make -j4 check_some
|
||||
;;
|
||||
2)
|
||||
OPT="-DTRAVIS -DROCKSDB_NAMESPACE=alternative_rocksdb_ns" LIB_MODE=shared V=1 make -j$MK_PARALLEL tools && OPT="-DTRAVIS -DROCKSDB_NAMESPACE=alternative_rocksdb_ns" LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_START=backup_engine_test ROCKSDBTESTS_END=db_universal_compaction_test make -j$MK_PARALLEL check_some
|
||||
OPT=-DTRAVIS V=1 make -j4 tools && OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=full_filter_block_test ROCKSDBTESTS_END=write_batch_with_index_test make -j4 check_some
|
||||
;;
|
||||
3)
|
||||
OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_START=db_universal_compaction_test ROCKSDBTESTS_END=table_properties_collector_test make -j$MK_PARALLEL check_some
|
||||
OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=write_batch_with_index_test ROCKSDBTESTS_END=write_prepared_transaction_test make -j4 check_some
|
||||
;;
|
||||
4)
|
||||
OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_START=table_properties_collector_test make -j$MK_PARALLEL check_some
|
||||
OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=write_prepared_transaction_test make -j4 check_some
|
||||
;;
|
||||
esac
|
||||
- case $JOB_NAME in
|
||||
java_test)
|
||||
OPT=-DTRAVIS LIB_MODE=shared V=1 make rocksdbjava jtest
|
||||
OPT=-DTRAVIS V=1 make clean jclean && make rocksdbjava jtest
|
||||
;;
|
||||
lite_build)
|
||||
OPT='-DTRAVIS -DROCKSDB_LITE' LIB_MODE=shared V=1 make -j$MK_PARALLEL all
|
||||
OPT='-DTRAVIS -DROCKSDB_LITE' V=1 make -j4 static_lib tools
|
||||
;;
|
||||
examples)
|
||||
OPT=-DTRAVIS LIB_MODE=shared V=1 make -j$MK_PARALLEL static_lib && cd examples && make -j$MK_PARALLEL
|
||||
OPT=-DTRAVIS V=1 make -j4 static_lib && cd examples && make -j4
|
||||
;;
|
||||
cmake-mingw)
|
||||
sudo update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix;
|
||||
mkdir build && cd build && cmake -DJNI=1 -DWITH_GFLAGS=OFF .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni
|
||||
mkdir build && cd build && cmake -DJNI=1 .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni
|
||||
;;
|
||||
cmake*)
|
||||
case $JOB_NAME in
|
||||
*-c++20)
|
||||
OPT=-DCMAKE_CXX_STANDARD=20
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTS=0 -DWITH_GFLAGS=0 -DWITH_BENCHMARK_TOOLS=0 -DWITH_TOOLS=0 -DWITH_CORE_TOOLS=1 .. && make -j$MK_PARALLEL && cd .. && rm -rf build && mkdir build && cd build && cmake -DJNI=1 .. -DCMAKE_BUILD_TYPE=Release $OPT && make -j$MK_PARALLEL rocksdb rocksdbjni
|
||||
mkdir build && cd build && cmake -DJNI=1 .. -DCMAKE_BUILD_TYPE=Release && make -j4 rocksdb rocksdbjni
|
||||
;;
|
||||
esac
|
||||
notifications:
|
||||
email:
|
||||
- leveldb@fb.com
|
||||
webhooks:
|
||||
- https://buildtimetrend.herokuapp.com/travis
|
||||
|
@ -1,6 +0,0 @@
|
||||
{
|
||||
"content_hash_warming": true,
|
||||
"content_hash_max_items": 333333,
|
||||
"hint_num_files_per_dir": 8,
|
||||
"fsevents_latency": 0.05
|
||||
}
|
1066
CMakeLists.txt
1066
CMakeLists.txt
File diff suppressed because it is too large
Load Diff
@ -1,77 +1,3 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all project spaces, and it also applies when
|
||||
an individual is representing the project or its community in public spaces.
|
||||
Examples of representing a project or community include using an official
|
||||
project e-mail address, posting via an official social media account, or acting
|
||||
as an appointed representative at an online or offline event. Representation of
|
||||
a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at <opensource-conduct@fb.com>. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
|
||||
Facebook has adopted a Code of Conduct that we expect project participants to adhere to. Please [read the full text](https://code.facebook.com/codeofconduct) so that you can understand what actions will and will not be tolerated.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RocksDB default options change log (NO LONGER MAINTAINED)
|
||||
# RocksDB default options change log
|
||||
## Unreleased
|
||||
* delayed_write_rate takes the rate given by rate_limiter if not specified.
|
||||
|
||||
|
1222
HISTORY.md
1222
HISTORY.md
File diff suppressed because it is too large
Load Diff
48
INSTALL.md
48
INSTALL.md
@ -6,7 +6,7 @@ than release mode.
|
||||
|
||||
RocksDB's library should be able to compile without any dependency installed,
|
||||
although we recommend installing some compression libraries (see below).
|
||||
We do depend on newer gcc/clang with C++17 support (GCC >= 7, Clang >= 5).
|
||||
We do depend on newer gcc/clang with C++11 support.
|
||||
|
||||
There are few options when compiling RocksDB:
|
||||
|
||||
@ -43,16 +43,12 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
command line flags processing. You can compile rocksdb library even
|
||||
if you don't have gflags installed.
|
||||
|
||||
* `make check` will also check code formatting, which requires [clang-format](https://clang.llvm.org/docs/ClangFormat.html)
|
||||
|
||||
* If you wish to build the RocksJava static target, then cmake is required for building Snappy.
|
||||
|
||||
* If you wish to run microbench (e.g, `make microbench`, `make ribbon_bench` or `cmake -DWITH_BENCHMARK=1`), Google benchmark >= 1.6.0 is needed.
|
||||
|
||||
## Supported platforms
|
||||
|
||||
* **Linux - Ubuntu**
|
||||
* Upgrade your gcc to version at least 7 to get C++17 support.
|
||||
* Upgrade your gcc to version at least 4.8 to get C++11 support.
|
||||
* Install gflags. First, try: `sudo apt-get install libgflags-dev`
|
||||
If this doesn't work and you're using Ubuntu, here's a nice tutorial:
|
||||
(http://askubuntu.com/questions/312173/installing-gflags-12-04)
|
||||
@ -64,7 +60,8 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
* Install zstandard: `sudo apt-get install libzstd-dev`.
|
||||
|
||||
* **Linux - CentOS / RHEL**
|
||||
* Upgrade your gcc to version at least 7 to get C++17 support
|
||||
* Upgrade your gcc to version at least 4.8 to get C++11 support:
|
||||
`yum install gcc48-c++`
|
||||
* Install gflags:
|
||||
|
||||
git clone https://github.com/gflags/gflags.git
|
||||
@ -97,28 +94,19 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
sudo yum install libasan
|
||||
|
||||
* Install zstandard:
|
||||
* With [EPEL](https://fedoraproject.org/wiki/EPEL):
|
||||
|
||||
sudo yum install libzstd-devel
|
||||
|
||||
* With CentOS 8:
|
||||
|
||||
sudo dnf install libzstd-devel
|
||||
|
||||
* From source:
|
||||
|
||||
wget https://github.com/facebook/zstd/archive/v1.1.3.tar.gz
|
||||
mv v1.1.3.tar.gz zstd-1.1.3.tar.gz
|
||||
tar zxvf zstd-1.1.3.tar.gz
|
||||
cd zstd-1.1.3
|
||||
make && sudo make install
|
||||
wget https://github.com/facebook/zstd/archive/v1.1.3.tar.gz
|
||||
mv v1.1.3.tar.gz zstd-1.1.3.tar.gz
|
||||
tar zxvf zstd-1.1.3.tar.gz
|
||||
cd zstd-1.1.3
|
||||
make && sudo make install
|
||||
|
||||
* **OS X**:
|
||||
* Install latest C++ compiler that supports C++ 17:
|
||||
* Install latest C++ compiler that supports C++ 11:
|
||||
* Update XCode: run `xcode-select --install` (or install it from XCode App's settting).
|
||||
* Install via [homebrew](http://brew.sh/).
|
||||
* If you're first time developer in MacOS, you still need to run: `xcode-select --install` in your command line.
|
||||
* run `brew tap homebrew/versions; brew install gcc7 --use-llvm` to install gcc 7 (or higher).
|
||||
* run `brew tap homebrew/versions; brew install gcc48 --use-llvm` to install gcc 4.8 (or higher).
|
||||
* run `brew install rocksdb`
|
||||
|
||||
* **FreeBSD** (11.01):
|
||||
@ -161,7 +149,7 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
|
||||
* Install the dependencies for RocksDB:
|
||||
|
||||
pkg_add gmake gflags snappy bzip2 lz4 zstd git jdk bash findutils gnuwatch
|
||||
pkg_add gmake gflags snappy bzip2 lz4 zstd git jdk bash findutils gnuwatch
|
||||
|
||||
* Build RocksDB from source:
|
||||
|
||||
@ -180,15 +168,16 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
* **iOS**:
|
||||
* Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`.
|
||||
|
||||
* **Windows** (Visual Studio 2017 to up):
|
||||
* **Windows**:
|
||||
* For building with MS Visual Studio 13 you will need Update 4 installed.
|
||||
* Read and follow the instructions at CMakeLists.txt
|
||||
* Or install via [vcpkg](https://github.com/microsoft/vcpkg)
|
||||
* Or install via [vcpkg](https://github.com/microsoft/vcpkg)
|
||||
* run `vcpkg install rocksdb:x64-windows`
|
||||
|
||||
* **AIX 6.1**
|
||||
* Install AIX Toolbox rpms with gcc
|
||||
* Use these environment variables:
|
||||
|
||||
|
||||
export PORTABLE=1
|
||||
export CC=gcc
|
||||
export AR="ar -X64"
|
||||
@ -199,9 +188,9 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
export LIBPATH=/opt/freeware/lib
|
||||
export JAVA_HOME=/usr/java8_64
|
||||
export PATH=/opt/freeware/bin:$PATH
|
||||
|
||||
|
||||
* **Solaris Sparc**
|
||||
* Install GCC 7 and higher.
|
||||
* Install GCC 4.8.2 and higher.
|
||||
* Use these environment variables:
|
||||
|
||||
export CC=gcc
|
||||
@ -210,3 +199,4 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
|
||||
export EXTRA_LDFLAGS=-m64
|
||||
export PORTABLE=1
|
||||
export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
This is the list of all known third-party language bindings for RocksDB. If something is missing, please open a pull request to add it.
|
||||
|
||||
* Java - https://github.com/facebook/rocksdb/tree/main/java
|
||||
* Java - https://github.com/facebook/rocksdb/tree/master/java
|
||||
* Python
|
||||
* http://python-rocksdb.readthedocs.io/en/latest/
|
||||
* http://pyrocksdb.readthedocs.org/en/latest/ (unmaintained)
|
||||
@ -10,9 +10,7 @@ This is the list of all known third-party language bindings for RocksDB. If some
|
||||
* Ruby - http://rubygems.org/gems/rocksdb-ruby
|
||||
* Haskell - https://hackage.haskell.org/package/rocksdb-haskell
|
||||
* PHP - https://github.com/Photonios/rocksdb-php
|
||||
* C#
|
||||
* https://github.com/warrenfalk/rocksdb-sharp
|
||||
* https://github.com/curiosity-ai/rocksdb-sharp
|
||||
* C# - https://github.com/warrenfalk/rocksdb-sharp
|
||||
* Rust
|
||||
* https://github.com/pingcap/rust-rocksdb (used in production fork of https://github.com/spacejam/rust-rocksdb)
|
||||
* https://github.com/spacejam/rust-rocksdb
|
||||
|
@ -1,7 +0,0 @@
|
||||
This is the list of all known third-party plugins for RocksDB. If something is missing, please open a pull request to add it.
|
||||
|
||||
* [Dedupfs](https://github.com/ajkr/dedupfs): an example for plugin developers to reference
|
||||
* [HDFS](https://github.com/riversand963/rocksdb-hdfs-env): an Env used for interacting with HDFS. Migrated from main RocksDB repo
|
||||
* [ZenFS](https://github.com/westerndigitalcorporation/zenfs): a file system for zoned block devices
|
||||
* [RADOS](https://github.com/riversand963/rocksdb-rados-env): an Env used for interacting with RADOS. Migrated from RocksDB main repo.
|
||||
* [PMEM](https://github.com/pmem/pmem-rocksdb-plugin): a collection of plugins to enable Persistent Memory on RocksDB.
|
15
README.md
15
README.md
@ -1,23 +1,22 @@
|
||||
## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage
|
||||
|
||||
[![CircleCI Status](https://circleci.com/gh/facebook/rocksdb.svg?style=svg)](https://circleci.com/gh/facebook/rocksdb)
|
||||
[![TravisCI Status](https://api.travis-ci.com/facebook/rocksdb.svg?branch=main)](https://travis-ci.com/github/facebook/rocksdb)
|
||||
[![Appveyor Build status](https://ci.appveyor.com/api/projects/status/fbgfu0so3afcno78/branch/main?svg=true)](https://ci.appveyor.com/project/Facebook/rocksdb/branch/main)
|
||||
[![PPC64le Build Status](http://140-211-168-68-openstack.osuosl.org:8080/buildStatus/icon?job=rocksdb&style=plastic)](http://140-211-168-68-openstack.osuosl.org:8080/job/rocksdb)
|
||||
[![Linux/Mac Build Status](https://travis-ci.org/facebook/rocksdb.svg?branch=master)](https://travis-ci.org/facebook/rocksdb)
|
||||
[![Windows Build status](https://ci.appveyor.com/api/projects/status/fbgfu0so3afcno78/branch/master?svg=true)](https://ci.appveyor.com/project/Facebook/rocksdb/branch/master)
|
||||
[![PPC64le Build Status](http://140.211.168.68:8080/buildStatus/icon?job=Rocksdb)](http://140.211.168.68:8080/job/Rocksdb)
|
||||
|
||||
RocksDB is developed and maintained by Facebook Database Engineering Team.
|
||||
It is built on earlier work on [LevelDB](https://github.com/google/leveldb) by Sanjay Ghemawat (sanjay@google.com)
|
||||
and Jeff Dean (jeff@google.com)
|
||||
|
||||
This code is a library that forms the core building block for a fast
|
||||
key-value server, especially suited for storing data on flash drives.
|
||||
key value server, especially suited for storing data on flash drives.
|
||||
It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs
|
||||
between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF)
|
||||
and Space-Amplification-Factor (SAF). It has multi-threaded compactions,
|
||||
making it especially suitable for storing multiple terabytes of data in a
|
||||
making it specially suitable for storing multiple terabytes of data in a
|
||||
single database.
|
||||
|
||||
Start with example usage here: https://github.com/facebook/rocksdb/tree/main/examples
|
||||
Start with example usage here: https://github.com/facebook/rocksdb/tree/master/examples
|
||||
|
||||
See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation.
|
||||
|
||||
@ -25,7 +24,7 @@ The public interface is in `include/`. Callers should not include or
|
||||
rely on the details of any other header files in this package. Those
|
||||
internal APIs may be changed without warning.
|
||||
|
||||
Questions and discussions are welcome on the [RocksDB Developers Public](https://www.facebook.com/groups/rocksdb.dev/) Facebook group and [email list](https://groups.google.com/g/rocksdb) on Google Groups.
|
||||
Design discussions are conducted in https://www.facebook.com/groups/rocksdb.dev/
|
||||
|
||||
## License
|
||||
|
||||
|
@ -4,7 +4,7 @@ RocksDBLite is a project focused on mobile use cases, which don't need a lot of
|
||||
|
||||
Some examples of the features disabled by ROCKSDB_LITE:
|
||||
* compiled-in support for LDB tool
|
||||
* No backup engine
|
||||
* No backupable DB
|
||||
* No support for replication (which we provide in form of TransactionalIterator)
|
||||
* No advanced monitoring tools
|
||||
* No special-purpose memtables that are highly optimized for specific use cases
|
||||
|
42
USERS.md
42
USERS.md
@ -26,9 +26,6 @@ Learn more about those use cases in a Tech Talk by Ankit Gupta and Naveen Somasu
|
||||
## Yahoo
|
||||
Yahoo is using RocksDB as a storage engine for their biggest distributed data store Sherpa. Learn more about it here: http://yahooeng.tumblr.com/post/120730204806/sherpa-scales-new-heights
|
||||
|
||||
## Baidu
|
||||
[Apache Doris](http://doris.apache.org/master/en/) is a MPP analytical database engine released by Baidu. It [uses RocksDB](http://doris.apache.org/master/en/administrator-guide/operation/tablet-meta-tool.html) to manage its tablet's metadata.
|
||||
|
||||
## CockroachDB
|
||||
CockroachDB is an open-source geo-replicated transactional database. They are using RocksDB as their storage engine. Check out their github: https://github.com/cockroachdb/cockroach
|
||||
|
||||
@ -47,16 +44,12 @@ Tango is using RocksDB as a graph storage to store all users' connection data an
|
||||
Turn is using RocksDB as a storage layer for their key/value store, serving at peak 2.4MM QPS out of different datacenters.
|
||||
Check out our RocksDB Protobuf merge operator at: https://github.com/vladb38/rocksdb_protobuf
|
||||
|
||||
## Santander UK/Cloudera Profession Services
|
||||
## Santanader UK/Cloudera Profession Services
|
||||
Check out their blog post: http://blog.cloudera.com/blog/2015/08/inside-santanders-near-real-time-data-ingest-architecture/
|
||||
|
||||
## Airbnb
|
||||
Airbnb is using RocksDB as a storage engine for their personalized search service. You can learn more about it here: https://www.youtube.com/watch?v=ASQ6XMtogMs
|
||||
|
||||
## Alluxio
|
||||
[Alluxio](https://www.alluxio.io) uses RocksDB to serve and scale file system metadata to beyond 1 Billion files. The detailed design and implementation is described in this engineering blog:
|
||||
https://www.alluxio.io/blog/scalable-metadata-service-in-alluxio-storing-billions-of-files/
|
||||
|
||||
## Pinterest
|
||||
Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtube.com/watch?v=MtFEVEs_2Vo
|
||||
|
||||
@ -70,7 +63,7 @@ Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtub
|
||||
[VWO's](https://vwo.com/) Smart Code checker and URL helper uses RocksDB to store all the URLs where VWO's Smart Code is installed.
|
||||
|
||||
## quasardb
|
||||
[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark.
|
||||
[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark.
|
||||
quasardb uses a heavily tuned RocksDB as its persistence layer.
|
||||
|
||||
## Netflix
|
||||
@ -89,7 +82,7 @@ quasardb uses a heavily tuned RocksDB as its persistence layer.
|
||||
[Uber](http://eng.uber.com/cherami/) uses RocksDB as a durable and scalable task queue.
|
||||
|
||||
## 360 Pika
|
||||
[360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been used in many companies.
|
||||
[360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been widely used in many company
|
||||
|
||||
## LzLabs
|
||||
LzLabs is using RocksDB as a storage engine in their multi-database distributed framework to store application configuration and user data.
|
||||
@ -98,31 +91,4 @@ LzLabs is using RocksDB as a storage engine in their multi-database distributed
|
||||
[ProfaneDB](https://profanedb.gitlab.io/) is a database for Protocol Buffers, and uses RocksDB for storage. It is accessible via gRPC, and the schema is defined using directly `.proto` files.
|
||||
|
||||
## IOTA Foundation
|
||||
[IOTA Foundation](https://www.iota.org/) is using RocksDB in the [IOTA Reference Implementation (IRI)](https://github.com/iotaledger/iri) to store the local state of the Tangle. The Tangle is the first open-source distributed ledger powering the future of the Internet of Things.
|
||||
|
||||
## Avrio Project
|
||||
[Avrio Project](http://avrio-project.github.io/avrio.network/) is using RocksDB in [Avrio ](https://github.com/avrio-project/avrio) to store blocks, account balances and data and other blockchain-releated data. Avrio is a multiblockchain decentralized cryptocurrency empowering monetary transactions.
|
||||
|
||||
## Crux
|
||||
[Crux](https://github.com/juxt/crux) is a document database that uses RocksDB for local [EAV](https://en.wikipedia.org/wiki/Entity%E2%80%93attribute%E2%80%93value_model) index storage to enable point-in-time bitemporal Datalog queries. The "unbundled" architecture uses Kafka to provide horizontal scalability.
|
||||
|
||||
## Nebula Graph
|
||||
[Nebula Graph](https://github.com/vesoft-inc/nebula) is a distributed, scalable, lightning-fast, open source graph database capable of hosting super large scale graphs with dozens of billions of vertices (nodes) and trillions of edges, with milliseconds of latency.
|
||||
|
||||
## YugabyteDB
|
||||
[YugabyteDB](https://www.yugabyte.com/) is an open source, high performance, distributed SQL database that uses RocksDB as its storage layer. For more information, please see https://github.com/yugabyte/yugabyte-db/.
|
||||
|
||||
## ArangoDB
|
||||
[ArangoDB](https://www.arangodb.com/) is a native multi-model database with flexible data models for documents, graphs, and key-values, for building high performance applications using a convenient SQL-like query language or JavaScript extensions. It uses RocksDB as its storage engine.
|
||||
|
||||
## Milvus
|
||||
[Milvus](https://milvus.io/) is an open source vector database for unstructured data. It uses RocksDB not only as one of the supported kv storage engines, but also as a message queue.
|
||||
|
||||
## Kafka
|
||||
[Kafka](https://kafka.apache.org/) is an open-source distributed event streaming platform, it uses RocksDB to store state in Kafka Streams: https://www.confluent.io/blog/how-to-tune-rocksdb-kafka-streams-state-stores-performance/.
|
||||
|
||||
## Solana Labs
|
||||
[Solana](https://github.com/solana-labs/solana) is a fast, secure, scalable, and decentralized blockchain. It uses RocksDB as the underlying storage for its ledger store.
|
||||
|
||||
## Others
|
||||
More databases using RocksDB can be found at [dbdb.io](https://dbdb.io/browse?embeds=rocksdb).
|
||||
[IOTA Foundation](https://www.iota.org/) is using RocksDB in the [IOTA Reference Implementation (IRI)](https://github.com/iotaledger/iri) to store the local state of the Tangle. The Tangle is the first open-source distributed ledger powering the future of the Internet of Things.
|
@ -24,7 +24,7 @@ We strive to achieve the following goals:
|
||||
* make all unit test pass both in debug and release builds.
|
||||
* Note: latest introduction of SyncPoint seems to disable running db_test in Release.
|
||||
* make performance on par with published benchmarks accounting for HW differences
|
||||
* we would like to keep the port code inline with the main branch with no forking
|
||||
* we would like to keep the port code inline with the master branch with no forking
|
||||
|
||||
## Build system
|
||||
We have chosen CMake as a widely accepted build system to build the Windows port. It is very fast and convenient.
|
||||
@ -66,7 +66,7 @@ We endeavored to make it functionally on par with posix_env. This means we repli
|
||||
Even though Windows provides its own efficient thread-pool implementation we chose to replicate posix logic using `std::thread` primitives. This allows anyone to quickly detect any changes within the posix source code and replicate them within windows env. This has proven to work very well. At the same time for anyone who wishes to replace the built-in thread-pool can do so using RocksDB stackable environments.
|
||||
|
||||
For disk access we implemented all of the functionality present within the posix_env which includes memory mapped files, random access, rate-limiter support etc.
|
||||
The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. It's not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WAL and MANIFEST.
|
||||
The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. It’s not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WAL and MANIFEST.
|
||||
|
||||
We have replaced `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure so we can atomically seek to the position of the disk operation but still perform the operation synchronously. Thus we able to emulate that functionality of `pread/pwrite` reasonably well. The only difference is that the file pointer is not returned to its original position but that hardly matters given the random nature of access.
|
||||
|
||||
|
15
appveyor.yml
Normal file
15
appveyor.yml
Normal file
@ -0,0 +1,15 @@
|
||||
version: 1.0.{build}
|
||||
image: Visual Studio 2017
|
||||
before_build:
|
||||
- md %APPVEYOR_BUILD_FOLDER%\build
|
||||
- cd %APPVEYOR_BUILD_FOLDER%\build
|
||||
- cmake -G "Visual Studio 15 Win64" -DOPTDBG=1 -DWITH_XPRESS=1 -DPORTABLE=1 -DJNI=1 ..
|
||||
- cd ..
|
||||
build:
|
||||
project: build\rocksdb.sln
|
||||
parallel: true
|
||||
verbosity: normal
|
||||
test:
|
||||
test_script:
|
||||
- ps: build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test2,db_test,env_basic_test,env_test -Concurrency 8
|
||||
|
File diff suppressed because it is too large
Load Diff
1594
buckifier/bench.json
1594
buckifier/bench.json
File diff suppressed because it is too large
Load Diff
244
buckifier/buckify_rocksdb.py
Executable file → Normal file
244
buckifier/buckify_rocksdb.py
Executable file → Normal file
@ -1,39 +1,15 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
try:
|
||||
from builtins import str
|
||||
except ImportError:
|
||||
from __builtin__ import str
|
||||
from targets_builder import TARGETSBuilder
|
||||
import json
|
||||
import os
|
||||
import fnmatch
|
||||
import sys
|
||||
|
||||
from util import ColorString
|
||||
|
||||
# This script generates TARGETS file for Buck.
|
||||
# Buck is a build tool specifying dependencies among different build targets.
|
||||
# User can pass extra dependencies as a JSON object via command line, and this
|
||||
# script can include these dependencies in the generate TARGETS file.
|
||||
# Usage:
|
||||
# $python3 buckifier/buckify_rocksdb.py
|
||||
# (This generates a TARGET file without user-specified dependency for unit
|
||||
# tests.)
|
||||
# $python3 buckifier/buckify_rocksdb.py \
|
||||
# '{"fake": {
|
||||
# "extra_deps": [":test_dep", "//fakes/module:mock1"],
|
||||
# "extra_compiler_flags": ["-DROCKSDB_LITE", "-Os"]
|
||||
# }
|
||||
# }'
|
||||
# (Generated TARGETS file has test_dep and mock1 as dependencies for RocksDB
|
||||
# unit tests, and will use the extra_compiler_flags to compile the unit test
|
||||
# source.)
|
||||
|
||||
# tests to export as libraries for inclusion in other projects
|
||||
_EXPORTED_TEST_LIBS = ["env_basic_test"]
|
||||
|
||||
@ -49,8 +25,8 @@ def parse_src_mk(repo_path):
|
||||
if '=' in line:
|
||||
current_src = line.split('=')[0].strip()
|
||||
src_files[current_src] = []
|
||||
elif '.c' in line:
|
||||
src_path = line.split('\\')[0].strip()
|
||||
elif '.cc' in line:
|
||||
src_path = line.split('.cc')[0].strip() + '.cc'
|
||||
src_files[current_src].append(src_path)
|
||||
return src_files
|
||||
|
||||
@ -70,93 +46,65 @@ def get_cc_files(repo_path):
|
||||
return cc_files
|
||||
|
||||
|
||||
# Get non_parallel tests from Makefile
|
||||
def get_non_parallel_tests(repo_path):
|
||||
# Get tests from Makefile
|
||||
def get_tests(repo_path):
|
||||
Makefile = repo_path + "/Makefile"
|
||||
|
||||
s = set({})
|
||||
# Dictionary TEST_NAME => IS_PARALLEL
|
||||
tests = {}
|
||||
|
||||
found_non_parallel_tests = False
|
||||
found_tests = False
|
||||
for line in open(Makefile):
|
||||
line = line.strip()
|
||||
if line.startswith("NON_PARALLEL_TEST ="):
|
||||
found_non_parallel_tests = True
|
||||
elif found_non_parallel_tests:
|
||||
if line.startswith("TESTS ="):
|
||||
found_tests = True
|
||||
elif found_tests:
|
||||
if line.endswith("\\"):
|
||||
# remove the trailing \
|
||||
line = line[:-1]
|
||||
line = line.strip()
|
||||
s.add(line)
|
||||
tests[line] = False
|
||||
else:
|
||||
# we consumed all the non_parallel tests
|
||||
# we consumed all the tests
|
||||
break
|
||||
|
||||
return s
|
||||
found_parallel_tests = False
|
||||
for line in open(Makefile):
|
||||
line = line.strip()
|
||||
if line.startswith("PARALLEL_TEST ="):
|
||||
found_parallel_tests = True
|
||||
elif found_parallel_tests:
|
||||
if line.endswith("\\"):
|
||||
# remove the trailing \
|
||||
line = line[:-1]
|
||||
line = line.strip()
|
||||
tests[line] = True
|
||||
else:
|
||||
# we consumed all the parallel tests
|
||||
break
|
||||
|
||||
# Parse extra dependencies passed by user from command line
|
||||
def get_dependencies():
|
||||
deps_map = {
|
||||
'': {
|
||||
'extra_deps': [],
|
||||
'extra_compiler_flags': []
|
||||
}
|
||||
}
|
||||
if len(sys.argv) < 2:
|
||||
return deps_map
|
||||
|
||||
def encode_dict(data):
|
||||
rv = {}
|
||||
for k, v in data.items():
|
||||
if isinstance(v, dict):
|
||||
v = encode_dict(v)
|
||||
rv[k] = v
|
||||
return rv
|
||||
extra_deps = json.loads(sys.argv[1], object_hook=encode_dict)
|
||||
for target_alias, deps in extra_deps.items():
|
||||
deps_map[target_alias] = deps
|
||||
return deps_map
|
||||
return tests
|
||||
|
||||
|
||||
# Prepare TARGETS file for buck
|
||||
def generate_targets(repo_path, deps_map):
|
||||
def generate_targets(repo_path):
|
||||
print(ColorString.info("Generating TARGETS"))
|
||||
# parsed src.mk file
|
||||
src_mk = parse_src_mk(repo_path)
|
||||
# get all .cc files
|
||||
cc_files = get_cc_files(repo_path)
|
||||
# get non_parallel tests from Makefile
|
||||
non_parallel_tests = get_non_parallel_tests(repo_path)
|
||||
# get tests from Makefile
|
||||
tests = get_tests(repo_path)
|
||||
|
||||
if src_mk is None or cc_files is None or non_parallel_tests is None:
|
||||
if src_mk is None or cc_files is None or tests is None:
|
||||
return False
|
||||
|
||||
extra_argv = ""
|
||||
if len(sys.argv) >= 2:
|
||||
# Heuristically quote and canonicalize whitespace for inclusion
|
||||
# in how the file was generated.
|
||||
extra_argv = " '{0}'".format(" ".join(sys.argv[1].split()))
|
||||
|
||||
TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path, extra_argv)
|
||||
|
||||
TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path)
|
||||
# rocksdb_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_lib",
|
||||
src_mk["LIB_SOURCES"] +
|
||||
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
||||
src_mk["RANGE_TREE_SOURCES"] +
|
||||
src_mk["TOOL_LIB_SOURCES"],
|
||||
deps=["//folly/container:f14_hash"])
|
||||
# rocksdb_whole_archive_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_whole_archive_lib",
|
||||
src_mk["LIB_SOURCES"] +
|
||||
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
||||
src_mk["RANGE_TREE_SOURCES"] +
|
||||
src_mk["TOOL_LIB_SOURCES"],
|
||||
deps=["//folly/container:f14_hash"],
|
||||
headers=None,
|
||||
extra_external_deps="",
|
||||
link_whole=True)
|
||||
src_mk["TOOL_LIB_SOURCES"])
|
||||
# rocksdb_test_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_test_lib",
|
||||
@ -164,118 +112,34 @@ def generate_targets(repo_path, deps_map):
|
||||
src_mk.get("TEST_LIB_SOURCES", []) +
|
||||
src_mk.get("EXP_LIB_SOURCES", []) +
|
||||
src_mk.get("ANALYZER_LIB_SOURCES", []),
|
||||
[":rocksdb_lib"],
|
||||
extra_test_libs=True
|
||||
)
|
||||
[":rocksdb_lib"])
|
||||
# rocksdb_tools_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_tools_lib",
|
||||
src_mk.get("BENCH_LIB_SOURCES", []) +
|
||||
src_mk.get("ANALYZER_LIB_SOURCES", []) +
|
||||
["test_util/testutil.cc"],
|
||||
["util/testutil.cc"],
|
||||
[":rocksdb_lib"])
|
||||
# rocksdb_cache_bench_tools_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_cache_bench_tools_lib",
|
||||
src_mk.get("CACHE_BENCH_LIB_SOURCES", []),
|
||||
[":rocksdb_lib"])
|
||||
# rocksdb_stress_lib
|
||||
TARGETS.add_rocksdb_library(
|
||||
"rocksdb_stress_lib",
|
||||
src_mk.get("ANALYZER_LIB_SOURCES", [])
|
||||
+ src_mk.get('STRESS_LIB_SOURCES', [])
|
||||
+ ["test_util/testutil.cc"])
|
||||
# db_stress binary
|
||||
TARGETS.add_binary("db_stress",
|
||||
["db_stress_tool/db_stress.cc"],
|
||||
[":rocksdb_stress_lib"])
|
||||
# bench binaries
|
||||
for src in src_mk.get("MICROBENCH_SOURCES", []):
|
||||
name = src.rsplit('/',1)[1].split('.')[0] if '/' in src else src.split('.')[0]
|
||||
TARGETS.add_binary(
|
||||
name,
|
||||
[src],
|
||||
[],
|
||||
extra_bench_libs=True
|
||||
)
|
||||
print("Extra dependencies:\n{0}".format(json.dumps(deps_map)))
|
||||
|
||||
# Dictionary test executable name -> relative source file path
|
||||
test_source_map = {}
|
||||
# test for every test we found in the Makefile
|
||||
for test in sorted(tests):
|
||||
match_src = [src for src in cc_files if ("/%s.c" % test) in src]
|
||||
if len(match_src) == 0:
|
||||
print(ColorString.warning("Cannot find .cc file for %s" % test))
|
||||
continue
|
||||
elif len(match_src) > 1:
|
||||
print(ColorString.warning("Found more than one .cc for %s" % test))
|
||||
print(match_src)
|
||||
continue
|
||||
|
||||
# c_test.c is added through TARGETS.add_c_test(). If there
|
||||
# are more than one .c test file, we need to extend
|
||||
# TARGETS.add_c_test() to include other C tests too.
|
||||
for test_src in src_mk.get("TEST_MAIN_SOURCES_C", []):
|
||||
if test_src != 'db/c_test.c':
|
||||
print("Don't know how to deal with " + test_src)
|
||||
return False
|
||||
TARGETS.add_c_test()
|
||||
assert(len(match_src) == 1)
|
||||
is_parallel = tests[test]
|
||||
TARGETS.register_test(test, match_src[0], is_parallel)
|
||||
|
||||
try:
|
||||
with open(f"{repo_path}/buckifier/bench.json") as json_file:
|
||||
fast_fancy_bench_config_list = json.load(json_file)
|
||||
for config_dict in fast_fancy_bench_config_list:
|
||||
clean_benchmarks = {}
|
||||
benchmarks = config_dict['benchmarks']
|
||||
for binary, benchmark_dict in benchmarks.items():
|
||||
clean_benchmarks[binary] = {}
|
||||
for benchmark, overloaded_metric_list in benchmark_dict.items():
|
||||
clean_benchmarks[binary][benchmark] = []
|
||||
for metric in overloaded_metric_list:
|
||||
if not isinstance(metric, dict):
|
||||
clean_benchmarks[binary][benchmark].append(metric)
|
||||
TARGETS.add_fancy_bench_config(config_dict['name'], clean_benchmarks, False, config_dict['expected_runtime_one_iter'], config_dict['sl_iterations'], config_dict['regression_threshold'])
|
||||
|
||||
with open(f"{repo_path}/buckifier/bench-slow.json") as json_file:
|
||||
slow_fancy_bench_config_list = json.load(json_file)
|
||||
for config_dict in slow_fancy_bench_config_list:
|
||||
clean_benchmarks = {}
|
||||
benchmarks = config_dict['benchmarks']
|
||||
for binary, benchmark_dict in benchmarks.items():
|
||||
clean_benchmarks[binary] = {}
|
||||
for benchmark, overloaded_metric_list in benchmark_dict.items():
|
||||
clean_benchmarks[binary][benchmark] = []
|
||||
for metric in overloaded_metric_list:
|
||||
if not isinstance(metric, dict):
|
||||
clean_benchmarks[binary][benchmark].append(metric)
|
||||
for config_dict in slow_fancy_bench_config_list:
|
||||
TARGETS.add_fancy_bench_config(config_dict['name']+"_slow", clean_benchmarks, True, config_dict['expected_runtime_one_iter'], config_dict['sl_iterations'], config_dict['regression_threshold'])
|
||||
# it is better servicelab experiments break
|
||||
# than rocksdb github ci
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
TARGETS.add_test_header()
|
||||
|
||||
for test_src in src_mk.get("TEST_MAIN_SOURCES", []):
|
||||
test = test_src.split('.c')[0].strip().split('/')[-1].strip()
|
||||
test_source_map[test] = test_src
|
||||
print("" + test + " " + test_src)
|
||||
|
||||
for target_alias, deps in deps_map.items():
|
||||
for test, test_src in sorted(test_source_map.items()):
|
||||
if len(test) == 0:
|
||||
print(ColorString.warning("Failed to get test name for %s" % test_src))
|
||||
continue
|
||||
|
||||
test_target_name = \
|
||||
test if not target_alias else test + "_" + target_alias
|
||||
|
||||
if test in _EXPORTED_TEST_LIBS:
|
||||
test_library = "%s_lib" % test_target_name
|
||||
TARGETS.add_library(test_library, [test_src], deps=[":rocksdb_test_lib"], extra_test_libs=True)
|
||||
TARGETS.register_test(
|
||||
test_target_name,
|
||||
test_src,
|
||||
deps = json.dumps(deps['extra_deps'] + [':'+test_library]),
|
||||
extra_compiler_flags = json.dumps(deps['extra_compiler_flags']))
|
||||
else:
|
||||
TARGETS.register_test(
|
||||
test_target_name,
|
||||
test_src,
|
||||
deps = json.dumps(deps['extra_deps'] + [":rocksdb_test_lib"] ),
|
||||
extra_compiler_flags = json.dumps(deps['extra_compiler_flags']))
|
||||
if test in _EXPORTED_TEST_LIBS:
|
||||
test_library = "%s_lib" % test
|
||||
TARGETS.add_library(test_library, match_src, [":rocksdb_test_lib"])
|
||||
TARGETS.flush_tests()
|
||||
|
||||
print(ColorString.info("Generated TARGETS Summary:"))
|
||||
print(ColorString.info("- %d libs" % TARGETS.total_lib))
|
||||
@ -293,16 +157,14 @@ def get_rocksdb_path():
|
||||
|
||||
return rocksdb_path
|
||||
|
||||
|
||||
def exit_with_error(msg):
|
||||
print(ColorString.error(msg))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
deps_map = get_dependencies()
|
||||
# Generate TARGETS file for buck
|
||||
ok = generate_targets(get_rocksdb_path(), deps_map)
|
||||
ok = generate_targets(get_rocksdb_path())
|
||||
if not ok:
|
||||
exit_with_error("Failed to generate TARGETS files")
|
||||
|
||||
|
@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# If clang_format_diff.py command is not specfied, we assume we are able to
|
||||
# access directly without any path.
|
||||
|
||||
TGT_DIFF=`git diff TARGETS | head -n 1`
|
||||
|
||||
if [ ! -z "$TGT_DIFF" ]
|
||||
then
|
||||
echo "TARGETS file has uncommitted changes. Skip this check."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo Backup original TARGETS file.
|
||||
|
||||
cp TARGETS TARGETS.bkp
|
||||
|
||||
${PYTHON:-python3} buckifier/buckify_rocksdb.py
|
||||
|
||||
TGT_DIFF=`git diff TARGETS | head -n 1`
|
||||
|
||||
if [ -z "$TGT_DIFF" ]
|
||||
then
|
||||
mv TARGETS.bkp TARGETS
|
||||
exit 0
|
||||
else
|
||||
echo "Please run '${PYTHON:-python3} buckifier/buckify_rocksdb.py' to update TARGETS file."
|
||||
echo "Do not manually update TARGETS file."
|
||||
${PYTHON:-python3} --version
|
||||
mv TARGETS.bkp TARGETS
|
||||
exit 1
|
||||
fi
|
@ -3,14 +3,7 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
try:
|
||||
from builtins import object
|
||||
from builtins import str
|
||||
except ImportError:
|
||||
from __builtin__ import object
|
||||
from __builtin__ import str
|
||||
import targets_cfg
|
||||
import pprint
|
||||
|
||||
def pretty_list(lst, indent=8):
|
||||
if lst is None or len(lst) == 0:
|
||||
@ -25,13 +18,11 @@ def pretty_list(lst, indent=8):
|
||||
return res
|
||||
|
||||
|
||||
class TARGETSBuilder(object):
|
||||
def __init__(self, path, extra_argv):
|
||||
class TARGETSBuilder:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.targets_file = open(path, 'wb')
|
||||
header = targets_cfg.rocksdb_target_header_template.format(
|
||||
extra_argv=extra_argv)
|
||||
self.targets_file.write(header.encode("utf-8"))
|
||||
self.targets_file = open(path, 'w')
|
||||
self.targets_file.write(targets_cfg.rocksdb_target_header)
|
||||
self.total_lib = 0
|
||||
self.total_bin = 0
|
||||
self.total_test = 0
|
||||
@ -40,74 +31,37 @@ class TARGETSBuilder(object):
|
||||
def __del__(self):
|
||||
self.targets_file.close()
|
||||
|
||||
def add_library(self, name, srcs, deps=None, headers=None,
|
||||
extra_external_deps="", link_whole=False,
|
||||
external_dependencies=None, extra_test_libs=False):
|
||||
if headers is not None:
|
||||
headers = "[" + pretty_list(headers) + "]"
|
||||
def add_library(self, name, srcs, deps=None, headers=None):
|
||||
headers_attr_prefix = ""
|
||||
if headers is None:
|
||||
headers_attr_prefix = "auto_"
|
||||
headers = "AutoHeaders.RECURSIVE_GLOB"
|
||||
self.targets_file.write(targets_cfg.library_template.format(
|
||||
name=name,
|
||||
srcs=pretty_list(srcs),
|
||||
headers_attr_prefix=headers_attr_prefix,
|
||||
headers=headers,
|
||||
deps=pretty_list(deps),
|
||||
extra_external_deps=extra_external_deps,
|
||||
link_whole=link_whole,
|
||||
external_dependencies=pretty_list(external_dependencies),
|
||||
extra_test_libs=extra_test_libs
|
||||
).encode("utf-8"))
|
||||
deps=pretty_list(deps)))
|
||||
self.total_lib = self.total_lib + 1
|
||||
|
||||
def add_rocksdb_library(self, name, srcs, headers=None,
|
||||
external_dependencies=None):
|
||||
if headers is not None:
|
||||
headers = "[" + pretty_list(headers) + "]"
|
||||
self.targets_file.write(targets_cfg.rocksdb_library_template.format(
|
||||
name=name,
|
||||
srcs=pretty_list(srcs),
|
||||
headers=headers,
|
||||
external_dependencies=pretty_list(external_dependencies)
|
||||
).encode("utf-8")
|
||||
)
|
||||
self.total_lib = self.total_lib + 1
|
||||
|
||||
def add_binary(self, name, srcs, deps=None, extra_preprocessor_flags=None,extra_bench_libs=False):
|
||||
self.targets_file.write(targets_cfg.binary_template.format(
|
||||
name=name,
|
||||
srcs=pretty_list(srcs),
|
||||
deps=pretty_list(deps),
|
||||
extra_preprocessor_flags=pretty_list(extra_preprocessor_flags),
|
||||
extra_bench_libs=extra_bench_libs,
|
||||
).encode("utf-8"))
|
||||
def add_binary(self, name, srcs, deps=None):
|
||||
self.targets_file.write(targets_cfg.binary_template % (
|
||||
name,
|
||||
pretty_list(srcs),
|
||||
pretty_list(deps)))
|
||||
self.total_bin = self.total_bin + 1
|
||||
|
||||
def add_c_test(self):
|
||||
self.targets_file.write(b"""
|
||||
add_c_test_wrapper()
|
||||
""")
|
||||
def register_test(self, test_name, src, is_parallel):
|
||||
exec_mode = "serial"
|
||||
if is_parallel:
|
||||
exec_mode = "parallel"
|
||||
self.tests_cfg += targets_cfg.test_cfg_template % (
|
||||
test_name,
|
||||
str(src),
|
||||
str(exec_mode))
|
||||
|
||||
def add_test_header(self):
|
||||
self.targets_file.write(b"""
|
||||
# Generate a test rule for each entry in ROCKS_TESTS
|
||||
# Do not build the tests in opt mode, since SyncPoint and other test code
|
||||
# will not be included.
|
||||
""")
|
||||
|
||||
def add_fancy_bench_config(self, name, bench_config, slow, expected_runtime, sl_iterations, regression_threshold):
|
||||
self.targets_file.write(targets_cfg.fancy_bench_template.format(
|
||||
name=name,
|
||||
bench_config=pprint.pformat(bench_config),
|
||||
slow=slow,
|
||||
expected_runtime=expected_runtime,
|
||||
sl_iterations=sl_iterations,
|
||||
regression_threshold=regression_threshold
|
||||
).encode("utf-8"))
|
||||
|
||||
def register_test(self,
|
||||
test_name,
|
||||
src,
|
||||
deps,
|
||||
extra_compiler_flags):
|
||||
|
||||
self.targets_file.write(targets_cfg.unittests_template.format(test_name=test_name,test_cc=str(src),deps=deps,
|
||||
extra_compiler_flags=extra_compiler_flags).encode("utf-8"))
|
||||
self.total_test = self.total_test + 1
|
||||
|
||||
def flush_tests(self):
|
||||
self.targets_file.write(targets_cfg.unittests_template % self.tests_cfg)
|
||||
self.tests_cfg = ""
|
||||
|
@ -3,44 +3,134 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
rocksdb_target_header = """load("@fbcode_macros//build_defs:auto_headers.bzl", "AutoHeaders")
|
||||
load("@fbcode_macros//build_defs:cpp_library.bzl", "cpp_library")
|
||||
load(":defs.bzl", "test_binary")
|
||||
|
||||
rocksdb_target_header_template = \
|
||||
"""# This file \100generated by:
|
||||
#$ python3 buckifier/buckify_rocksdb.py{extra_argv}
|
||||
# --> DO NOT EDIT MANUALLY <--
|
||||
# This file is a Facebook-specific integration for buck builds, so can
|
||||
# only be validated by Facebook employees.
|
||||
#
|
||||
# @noautodeps @nocodemods
|
||||
load("//rocks/buckifier:defs.bzl", "cpp_library_wrapper","rocks_cpp_library_wrapper","cpp_binary_wrapper","cpp_unittest_wrapper","fancy_bench_wrapper","add_c_test_wrapper")
|
||||
REPO_PATH = package_name() + "/"
|
||||
|
||||
ROCKSDB_COMPILER_FLAGS = [
|
||||
"-fno-builtin-memcmp",
|
||||
"-DROCKSDB_PLATFORM_POSIX",
|
||||
"-DROCKSDB_LIB_IO_POSIX",
|
||||
"-DROCKSDB_FALLOCATE_PRESENT",
|
||||
"-DROCKSDB_MALLOC_USABLE_SIZE",
|
||||
"-DROCKSDB_RANGESYNC_PRESENT",
|
||||
"-DROCKSDB_SCHED_GETCPU_PRESENT",
|
||||
"-DROCKSDB_SUPPORT_THREAD_LOCAL",
|
||||
"-DOS_LINUX",
|
||||
# Flags to enable libs we include
|
||||
"-DSNAPPY",
|
||||
"-DZLIB",
|
||||
"-DBZIP2",
|
||||
"-DLZ4",
|
||||
"-DZSTD",
|
||||
"-DZSTD_STATIC_LINKING_ONLY",
|
||||
"-DGFLAGS=gflags",
|
||||
"-DNUMA",
|
||||
"-DTBB",
|
||||
# Needed to compile in fbcode
|
||||
"-Wno-expansion-to-defined",
|
||||
# Added missing flags from output of build_detect_platform
|
||||
"-DROCKSDB_PTHREAD_ADAPTIVE_MUTEX",
|
||||
"-DROCKSDB_BACKTRACE",
|
||||
"-Wnarrowing",
|
||||
]
|
||||
|
||||
ROCKSDB_EXTERNAL_DEPS = [
|
||||
("bzip2", None, "bz2"),
|
||||
("snappy", None, "snappy"),
|
||||
("zlib", None, "z"),
|
||||
("gflags", None, "gflags"),
|
||||
("lz4", None, "lz4"),
|
||||
("zstd", None),
|
||||
("tbb", None),
|
||||
("numa", None, "numa"),
|
||||
("googletest", None, "gtest"),
|
||||
]
|
||||
|
||||
ROCKSDB_PREPROCESSOR_FLAGS = [
|
||||
# Directories with files for #include
|
||||
"-I" + REPO_PATH + "include/",
|
||||
"-I" + REPO_PATH,
|
||||
]
|
||||
|
||||
ROCKSDB_ARCH_PREPROCESSOR_FLAGS = {
|
||||
"x86_64": [
|
||||
"-DHAVE_SSE42",
|
||||
"-DHAVE_PCLMUL",
|
||||
],
|
||||
}
|
||||
|
||||
build_mode = read_config("fbcode", "build_mode")
|
||||
|
||||
is_opt_mode = build_mode.startswith("opt")
|
||||
|
||||
# -DNDEBUG is added by default in opt mode in fbcode. But adding it twice
|
||||
# doesn't harm and avoid forgetting to add it.
|
||||
ROCKSDB_COMPILER_FLAGS += (["-DNDEBUG"] if is_opt_mode else [])
|
||||
|
||||
sanitizer = read_config("fbcode", "sanitizer")
|
||||
|
||||
# Do not enable jemalloc if sanitizer presents. RocksDB will further detect
|
||||
# whether the binary is linked with jemalloc at runtime.
|
||||
ROCKSDB_COMPILER_FLAGS += (["-DROCKSDB_JEMALLOC"] if sanitizer == "" else [])
|
||||
|
||||
ROCKSDB_EXTERNAL_DEPS += ([("jemalloc", None, "headers")] if sanitizer == "" else [])
|
||||
"""
|
||||
|
||||
|
||||
library_template = """
|
||||
cpp_library_wrapper(name="{name}", srcs=[{srcs}], deps=[{deps}], headers={headers}, link_whole={link_whole}, extra_test_libs={extra_test_libs})
|
||||
cpp_library(
|
||||
name = "{name}",
|
||||
srcs = [{srcs}],
|
||||
{headers_attr_prefix}headers = {headers},
|
||||
arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
|
||||
compiler_flags = ROCKSDB_COMPILER_FLAGS,
|
||||
preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
|
||||
deps = [{deps}],
|
||||
external_deps = ROCKSDB_EXTERNAL_DEPS,
|
||||
)
|
||||
"""
|
||||
|
||||
rocksdb_library_template = """
|
||||
rocks_cpp_library_wrapper(name="{name}", srcs=[{srcs}], headers={headers})
|
||||
|
||||
"""
|
||||
|
||||
|
||||
|
||||
binary_template = """
|
||||
cpp_binary_wrapper(name="{name}", srcs=[{srcs}], deps=[{deps}], extra_preprocessor_flags=[{extra_preprocessor_flags}], extra_bench_libs={extra_bench_libs})
|
||||
cpp_binary(
|
||||
name = "%s",
|
||||
srcs = [%s],
|
||||
arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
|
||||
compiler_flags = ROCKSDB_COMPILER_FLAGS,
|
||||
preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
|
||||
deps = [%s],
|
||||
external_deps = ROCKSDB_EXTERNAL_DEPS,
|
||||
)
|
||||
"""
|
||||
|
||||
test_cfg_template = """ [
|
||||
"%s",
|
||||
"%s",
|
||||
"%s",
|
||||
],
|
||||
"""
|
||||
|
||||
unittests_template = """
|
||||
cpp_unittest_wrapper(name="{test_name}",
|
||||
srcs=["{test_cc}"],
|
||||
deps={deps},
|
||||
extra_compiler_flags={extra_compiler_flags})
|
||||
|
||||
"""
|
||||
|
||||
fancy_bench_template = """
|
||||
fancy_bench_wrapper(suite_name="{name}", binary_to_bench_to_metric_list_map={bench_config}, slow={slow}, expected_runtime={expected_runtime}, sl_iterations={sl_iterations}, regression_threshold={regression_threshold})
|
||||
# [test_name, test_src, test_type]
|
||||
ROCKS_TESTS = [
|
||||
%s]
|
||||
|
||||
# Generate a test rule for each entry in ROCKS_TESTS
|
||||
# Do not build the tests in opt mode, since SyncPoint and other test code
|
||||
# will not be included.
|
||||
[
|
||||
test_binary(
|
||||
parallelism = parallelism,
|
||||
rocksdb_arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
|
||||
rocksdb_compiler_flags = ROCKSDB_COMPILER_FLAGS,
|
||||
rocksdb_external_deps = ROCKSDB_EXTERNAL_DEPS,
|
||||
rocksdb_preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
|
||||
test_cc = test_cc,
|
||||
test_name = test_name,
|
||||
)
|
||||
for test_name, test_cc, parallelism in ROCKS_TESTS
|
||||
if not is_opt_mode
|
||||
]
|
||||
"""
|
||||
|
@ -6,16 +6,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
try:
|
||||
from builtins import object
|
||||
except ImportError:
|
||||
from __builtin__ import object
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
class ColorString(object):
|
||||
class ColorString:
|
||||
""" Generate colorful strings on terminal """
|
||||
HEADER = '\033[95m'
|
||||
BLUE = '\033[94m'
|
||||
@ -26,13 +21,7 @@ class ColorString(object):
|
||||
|
||||
@staticmethod
|
||||
def _make_color_str(text, color):
|
||||
# In Python2, default encoding for unicode string is ASCII
|
||||
if sys.version_info.major <= 2:
|
||||
return "".join(
|
||||
[color, text.encode('utf-8'), ColorString.ENDC])
|
||||
# From Python3, default encoding for unicode string is UTF-8
|
||||
return "".join(
|
||||
[color, text, ColorString.ENDC])
|
||||
return "".join([color, text.encode('utf-8'), ColorString.ENDC])
|
||||
|
||||
@staticmethod
|
||||
def ok(text):
|
||||
|
377
build_tools/RocksDBCommonHelper.php
Normal file
377
build_tools/RocksDBCommonHelper.php
Normal file
@ -0,0 +1,377 @@
|
||||
<?php
|
||||
// Copyright 2004-present Facebook. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
// Name of the environment variables which need to be set by the entity which
|
||||
// triggers continuous runs so that code at the end of the file gets executed
|
||||
// and Sandcastle run starts.
|
||||
const ENV_POST_RECEIVE_HOOK = "POST_RECEIVE_HOOK";
|
||||
const ENV_HTTPS_APP_VALUE = "HTTPS_APP_VALUE";
|
||||
const ENV_HTTPS_TOKEN_VALUE = "HTTPS_TOKEN_VALUE";
|
||||
|
||||
const PRIMARY_TOKEN_FILE = '/home/krad/.sandcastle';
|
||||
const CONT_RUN_ALIAS = "leveldb";
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
/* Run tests in sandcastle */
|
||||
function postURL($diffID, $url) {
|
||||
assert(strlen($diffID) > 0);
|
||||
assert(is_numeric($diffID));
|
||||
assert(strlen($url) > 0);
|
||||
|
||||
$cmd_args = array(
|
||||
'diff_id' => (int)$diffID,
|
||||
'name' => sprintf(
|
||||
'click here for sandcastle tests for D%d',
|
||||
(int)$diffID
|
||||
),
|
||||
'link' => $url
|
||||
);
|
||||
$cmd = 'echo ' . escapeshellarg(json_encode($cmd_args))
|
||||
. ' | arc call-conduit differential.updateunitresults';
|
||||
|
||||
shell_exec($cmd);
|
||||
}
|
||||
|
||||
function buildUpdateTestStatusCmd($diffID, $test, $status) {
|
||||
assert(strlen($diffID) > 0);
|
||||
assert(is_numeric($diffID));
|
||||
assert(strlen($test) > 0);
|
||||
assert(strlen($status) > 0);
|
||||
|
||||
$cmd_args = array(
|
||||
'diff_id' => (int)$diffID,
|
||||
'name' => $test,
|
||||
'result' => $status
|
||||
);
|
||||
|
||||
$cmd = 'echo ' . escapeshellarg(json_encode($cmd_args))
|
||||
. ' | arc call-conduit differential.updateunitresults';
|
||||
|
||||
return $cmd;
|
||||
}
|
||||
|
||||
function updateTestStatus($diffID, $test) {
|
||||
assert(strlen($diffID) > 0);
|
||||
assert(is_numeric($diffID));
|
||||
assert(strlen($test) > 0);
|
||||
|
||||
shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting"));
|
||||
}
|
||||
|
||||
function getSteps($applyDiff, $diffID, $username, $test) {
|
||||
assert(strlen($username) > 0);
|
||||
assert(strlen($test) > 0);
|
||||
|
||||
if ($applyDiff) {
|
||||
assert(strlen($diffID) > 0);
|
||||
assert(is_numeric($diffID));
|
||||
|
||||
$arcrc_content = (PHP_OS == "Darwin" ?
|
||||
exec("cat ~/.arcrc | gzip -f | base64") :
|
||||
exec("cat ~/.arcrc | gzip -f | base64 -w0"));
|
||||
assert(strlen($arcrc_content) > 0);
|
||||
|
||||
// Sandcastle machines don't have arc setup. We copy the user certificate
|
||||
// and authenticate using that in Sandcastle.
|
||||
$setup = array(
|
||||
"name" => "Setup arcrc",
|
||||
"shell" => "echo " . escapeshellarg($arcrc_content) . " | base64 --decode"
|
||||
. " | gzip -d > ~/.arcrc",
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
// arc demands certain permission on its config.
|
||||
// also fix the sticky bit issue in sandcastle
|
||||
$fix_permission = array(
|
||||
"name" => "Fix environment",
|
||||
"shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm",
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
// Construct the steps in the order of execution.
|
||||
$steps[] = $setup;
|
||||
$steps[] = $fix_permission;
|
||||
}
|
||||
|
||||
// fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise
|
||||
// Git thinks it is an uncommitted change.
|
||||
$fix_git_ignore = array(
|
||||
"name" => "Fix git ignore",
|
||||
"shell" => "echo fbcode >> .git/info/exclude",
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
// This fixes "FATAL: ThreadSanitizer can not mmap the shadow memory"
|
||||
// Source:
|
||||
// https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual#FAQ
|
||||
$fix_kernel_issue = array(
|
||||
"name" => "Fix kernel issue with tsan",
|
||||
"shell" => "echo 2 >/proc/sys/kernel/randomize_va_space",
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
$steps[] = $fix_git_ignore;
|
||||
$steps[] = $fix_kernel_issue;
|
||||
|
||||
// This will be the command used to execute particular type of tests.
|
||||
$cmd = "";
|
||||
|
||||
if ($applyDiff) {
|
||||
// Patch the code (keep your fingures crossed).
|
||||
$patch = array(
|
||||
"name" => "Patch " . $diffID,
|
||||
"shell" => "arc --arcrc-file ~/.arcrc "
|
||||
. "patch --nocommit --diff " . escapeshellarg($diffID),
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
$steps[] = $patch;
|
||||
|
||||
updateTestStatus($diffID, $test);
|
||||
$cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; ";
|
||||
}
|
||||
|
||||
// Run the actual command.
|
||||
$cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " .
|
||||
escapeshellarg($test) . "; exit_code=$?; ";
|
||||
|
||||
if ($applyDiff) {
|
||||
$cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&"
|
||||
. buildUpdateTestStatusCmd($diffID, $test, "pass") . ")"
|
||||
. "||" . buildUpdateTestStatusCmd($diffID, $test, "fail")
|
||||
. "; ";
|
||||
}
|
||||
|
||||
// shell command to sort the tests based on exit code and print
|
||||
// the output of the log files.
|
||||
$cat_sorted_logs = "
|
||||
while read code log_file;
|
||||
do echo \"################ cat \$log_file [exit_code : \$code] ################\";
|
||||
cat \$log_file;
|
||||
done < <(tail -n +2 LOG | sort -k7,7n -k4,4gr | awk '{print \$7,\$NF}')";
|
||||
|
||||
// Shell command to cat all log files
|
||||
$cat_all_logs = "for f in `ls t/!(run-*)`; do echo \$f;cat \$f; done";
|
||||
|
||||
// If LOG file exist use it to cat log files sorted by exit code, otherwise
|
||||
// cat everything
|
||||
$logs_cmd = "if [ -f LOG ]; then {$cat_sorted_logs}; else {$cat_all_logs}; fi";
|
||||
|
||||
$cmd = $cmd . " cat /tmp/precommit-check.log"
|
||||
. "; shopt -s extglob; {$logs_cmd}"
|
||||
. "; shopt -u extglob; [[ \$exit_code -eq 0 ]]";
|
||||
assert(strlen($cmd) > 0);
|
||||
|
||||
$run_test = array(
|
||||
"name" => "Run " . $test,
|
||||
"shell" => $cmd,
|
||||
"user" => "root",
|
||||
"parser" => "python build_tools/error_filter.py " . escapeshellarg($test),
|
||||
);
|
||||
|
||||
$steps[] = $run_test;
|
||||
|
||||
if ($applyDiff) {
|
||||
// Clean up the user arc config we are using.
|
||||
$cleanup = array(
|
||||
"name" => "Arc cleanup",
|
||||
"shell" => "rm -f ~/.arcrc",
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
$steps[] = $cleanup;
|
||||
}
|
||||
|
||||
assert(count($steps) > 0);
|
||||
return $steps;
|
||||
}
|
||||
|
||||
function getSandcastleConfig() {
|
||||
$sandcastle_config = array();
|
||||
|
||||
$cwd = getcwd();
|
||||
$cwd_token_file = "{$cwd}/.sandcastle";
|
||||
// This is a case when we're executed from a continuous run. Fetch the values
|
||||
// from the environment.
|
||||
if (getenv(ENV_POST_RECEIVE_HOOK)) {
|
||||
$sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE);
|
||||
$sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE);
|
||||
} else {
|
||||
// This is a typical `[p]arc diff` case. Fetch the values from the specific
|
||||
// configuration files.
|
||||
for ($i = 0; $i < 50; $i++) {
|
||||
if (file_exists(PRIMARY_TOKEN_FILE) ||
|
||||
file_exists($cwd_token_file)) {
|
||||
break;
|
||||
}
|
||||
// If we failed to fetch the tokens, sleep for 0.2 second and try again
|
||||
usleep(200000);
|
||||
}
|
||||
assert(file_exists(PRIMARY_TOKEN_FILE) ||
|
||||
file_exists($cwd_token_file));
|
||||
|
||||
// Try the primary location first, followed by a secondary.
|
||||
if (file_exists(PRIMARY_TOKEN_FILE)) {
|
||||
$cmd = 'cat ' . PRIMARY_TOKEN_FILE;
|
||||
} else {
|
||||
$cmd = 'cat ' . escapeshellarg($cwd_token_file);
|
||||
}
|
||||
|
||||
assert(strlen($cmd) > 0);
|
||||
$sandcastle_config = explode(':', rtrim(shell_exec($cmd)));
|
||||
}
|
||||
|
||||
// In this case be very explicit about the implications.
|
||||
if (count($sandcastle_config) != 2) {
|
||||
echo "Sandcastle configuration files don't contain valid information " .
|
||||
"or the necessary environment variables aren't defined. Unable " .
|
||||
"to validate the code changes.";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
assert(strlen($sandcastle_config[0]) > 0);
|
||||
assert(strlen($sandcastle_config[1]) > 0);
|
||||
assert(count($sandcastle_config) > 0);
|
||||
|
||||
return $sandcastle_config;
|
||||
}
|
||||
|
||||
// This function can be called either from `[p]arc diff` command or during
|
||||
// the Git post-receive hook.
|
||||
function startTestsInSandcastle($applyDiff, $workflow, $diffID) {
|
||||
// Default options don't terminate on failure, but that's what we want. In
|
||||
// the current case we use assertions intentionally as "terminate on failure
|
||||
// invariants".
|
||||
assert_options(ASSERT_BAIL, true);
|
||||
|
||||
// In case of a diff we'll send notificatios to the author. Else it'll go to
|
||||
// the entire team because failures indicate that build quality has regressed.
|
||||
$username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS;
|
||||
assert(strlen($username) > 0);
|
||||
|
||||
if ($applyDiff) {
|
||||
assert($workflow);
|
||||
assert(strlen($diffID) > 0);
|
||||
assert(is_numeric($diffID));
|
||||
}
|
||||
|
||||
// List of tests we want to run in Sandcastle.
|
||||
$tests = array("unit", "unit_non_shm", "unit_481", "clang_unit", "tsan",
|
||||
"asan", "lite_test", "valgrind", "release", "release_481",
|
||||
"clang_release", "clang_analyze", "code_cov",
|
||||
"java_build", "no_compression", "unity", "ubsan");
|
||||
|
||||
$send_email_template = array(
|
||||
'type' => 'email',
|
||||
'triggers' => array('fail'),
|
||||
'emails' => array($username . '@fb.com'),
|
||||
);
|
||||
|
||||
// Construct a job definition for each test and add it to the master plan.
|
||||
foreach ($tests as $test) {
|
||||
$stepName = "RocksDB diff " . $diffID . " test " . $test;
|
||||
|
||||
if (!$applyDiff) {
|
||||
$stepName = "RocksDB continuous integration test " . $test;
|
||||
}
|
||||
|
||||
$arg[] = array(
|
||||
"name" => $stepName,
|
||||
"report" => array($send_email_template),
|
||||
"steps" => getSteps($applyDiff, $diffID, $username, $test)
|
||||
);
|
||||
}
|
||||
|
||||
// We cannot submit the parallel execution master plan to Sandcastle and
|
||||
// need supply the job plan as a determinator. So we construct a small job
|
||||
// that will spit out the master job plan which Sandcastle will parse and
|
||||
// execute. Why compress the job definitions? Otherwise we run over the max
|
||||
// string size.
|
||||
$cmd = "echo " . base64_encode(json_encode($arg))
|
||||
. (PHP_OS == "Darwin" ?
|
||||
" | gzip -f | base64" :
|
||||
" | gzip -f | base64 -w0");
|
||||
assert(strlen($cmd) > 0);
|
||||
|
||||
$arg_encoded = shell_exec($cmd);
|
||||
assert(strlen($arg_encoded) > 0);
|
||||
|
||||
$runName = "Run diff " . $diffID . "for user " . $username;
|
||||
|
||||
if (!$applyDiff) {
|
||||
$runName = "RocksDB continuous integration build and test run";
|
||||
}
|
||||
|
||||
$command = array(
|
||||
"name" => $runName,
|
||||
"steps" => array()
|
||||
);
|
||||
|
||||
$command["steps"][] = array(
|
||||
"name" => "Generate determinator",
|
||||
"shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d"
|
||||
. " | base64 --decode",
|
||||
"determinator" => true,
|
||||
"user" => "root"
|
||||
);
|
||||
|
||||
// Submit to Sandcastle.
|
||||
$url = 'https://interngraph.intern.facebook.com/sandcastle/create';
|
||||
|
||||
$job = array(
|
||||
'command' => 'SandcastleUniversalCommand',
|
||||
'args' => $command,
|
||||
'capabilities' => array(
|
||||
'vcs' => 'rocksdb-int-git',
|
||||
'type' => 'lego',
|
||||
),
|
||||
'hash' => 'origin/master',
|
||||
'user' => $username,
|
||||
'alias' => 'rocksdb-precommit',
|
||||
'tags' => array('rocksdb'),
|
||||
'description' => 'Rocksdb precommit job',
|
||||
);
|
||||
|
||||
// Fetch the configuration necessary to submit a successful HTTPS request.
|
||||
$sandcastle_config = getSandcastleConfig();
|
||||
|
||||
$app = $sandcastle_config[0];
|
||||
$token = $sandcastle_config[1];
|
||||
|
||||
$cmd = 'curl -s -k '
|
||||
. ' -F app=' . escapeshellarg($app)
|
||||
. ' -F token=' . escapeshellarg($token)
|
||||
. ' -F job=' . escapeshellarg(json_encode($job))
|
||||
.' ' . escapeshellarg($url);
|
||||
|
||||
$output = shell_exec($cmd);
|
||||
assert(strlen($output) > 0);
|
||||
|
||||
// Extract Sandcastle URL from the response.
|
||||
preg_match('/url": "(.+)"/', $output, $sandcastle_url);
|
||||
|
||||
assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request.");
|
||||
assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL.");
|
||||
|
||||
if ($applyDiff) {
|
||||
echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n";
|
||||
// Ask Phabricator to display it on the diff UI.
|
||||
postURL($diffID, $sandcastle_url[1]);
|
||||
} else {
|
||||
echo "Continuous integration started Sandcastle tests. You can look at ";
|
||||
echo "the progress at:\n" . $sandcastle_url[1] . "\n";
|
||||
}
|
||||
}
|
||||
|
||||
// Continuous run cript will set the environment variable and based on that
|
||||
// we'll trigger the execution of tests in Sandcastle. In that case we don't
|
||||
// need to apply any diffs and there's no associated workflow either.
|
||||
if (getenv(ENV_POST_RECEIVE_HOOK)) {
|
||||
startTestsInSandcastle(
|
||||
false /* $applyDiff */,
|
||||
NULL /* $workflow */,
|
||||
NULL /* $diffID */);
|
||||
}
|
@ -9,7 +9,6 @@
|
||||
# PLATFORM_LDFLAGS Linker flags
|
||||
# JAVA_LDFLAGS Linker flags for RocksDBJava
|
||||
# JAVA_STATIC_LDFLAGS Linker flags for RocksDBJava static build
|
||||
# JAVAC_ARGS Arguments for javac
|
||||
# PLATFORM_SHARED_EXT Extension for shared libraries
|
||||
# PLATFORM_SHARED_LDFLAGS Flags for building shared library
|
||||
# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
|
||||
@ -28,7 +27,6 @@
|
||||
# -DZSTD if the ZSTD library is present
|
||||
# -DNUMA if the NUMA library is present
|
||||
# -DTBB if the TBB library is present
|
||||
# -DMEMKIND if the memkind library is present
|
||||
#
|
||||
# Using gflags in rocksdb:
|
||||
# Our project depends on gflags, which requires users to take some extra steps
|
||||
@ -45,30 +43,23 @@ if test -z "$OUTPUT"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# we depend on C++17, but should be compatible with newer standards
|
||||
if [ "$ROCKSDB_CXX_STANDARD" ]; then
|
||||
PLATFORM_CXXFLAGS="-std=$ROCKSDB_CXX_STANDARD"
|
||||
else
|
||||
PLATFORM_CXXFLAGS="-std=c++17"
|
||||
fi
|
||||
|
||||
# we depend on C++11
|
||||
PLATFORM_CXXFLAGS="-std=c++11"
|
||||
# we currently depend on POSIX platform
|
||||
COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX"
|
||||
|
||||
# Default to fbcode gcc on internal fb machines
|
||||
if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then
|
||||
FBCODE_BUILD="true"
|
||||
# If we're compiling with TSAN or shared lib, we need pic build
|
||||
# If we're compiling with TSAN we need pic build
|
||||
PIC_BUILD=$COMPILE_WITH_TSAN
|
||||
if [ "$LIB_MODE" == "shared" ]; then
|
||||
PIC_BUILD=1
|
||||
fi
|
||||
if [ -n "$ROCKSDB_FBCODE_BUILD_WITH_PLATFORM010" ]; then
|
||||
source "$PWD/build_tools/fbcode_config_platform010.sh"
|
||||
elif [ -n "$ROCKSDB_FBCODE_BUILD_WITH_PLATFORM009" ]; then
|
||||
source "$PWD/build_tools/fbcode_config_platform009.sh"
|
||||
if [ -n "$ROCKSDB_FBCODE_BUILD_WITH_481" ]; then
|
||||
# we need this to build with MySQL. Don't use for other purposes.
|
||||
source "$PWD/build_tools/fbcode_config4.8.1.sh"
|
||||
elif [ -n "$ROCKSDB_FBCODE_BUILD_WITH_PLATFORM007" ]; then
|
||||
source "$PWD/build_tools/fbcode_config_platform007.sh"
|
||||
else
|
||||
source "$PWD/build_tools/fbcode_config_platform009.sh"
|
||||
source "$PWD/build_tools/fbcode_config.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -96,16 +87,6 @@ if test -z "$CXX"; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if test -z "$AR"; then
|
||||
if [ -x "$(command -v gcc-ar)" ]; then
|
||||
AR=gcc-ar
|
||||
elif [ -x "$(command -v llvm-ar)" ]; then
|
||||
AR=llvm-ar
|
||||
else
|
||||
AR=ar
|
||||
fi
|
||||
fi
|
||||
|
||||
# Detect OS
|
||||
if test -z "$TARGET_OS"; then
|
||||
TARGET_OS=`uname -s`
|
||||
@ -168,25 +149,7 @@ case "$TARGET_OS" in
|
||||
else
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -latomic"
|
||||
fi
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt -ldl"
|
||||
if test -z "$ROCKSDB_USE_IO_URING"; then
|
||||
ROCKSDB_USE_IO_URING=1
|
||||
fi
|
||||
if test "$ROCKSDB_USE_IO_URING" -ne 0; then
|
||||
# check for liburing
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -luring -o test.o 2>/dev/null <<EOF
|
||||
#include <liburing.h>
|
||||
int main() {
|
||||
struct io_uring ring;
|
||||
io_uring_queue_init(1, &ring, 0);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -luring"
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_IOURING_PRESENT"
|
||||
fi
|
||||
fi
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
|
||||
# PORT_FILES=port/linux/linux_specific.cc
|
||||
;;
|
||||
SunOS)
|
||||
@ -209,17 +172,6 @@ EOF
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
|
||||
# PORT_FILES=port/freebsd/freebsd_specific.cc
|
||||
;;
|
||||
GNU/kFreeBSD)
|
||||
PLATFORM=OS_GNU_KFREEBSD
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DOS_GNU_KFREEBSD"
|
||||
if [ -z "$USE_CLANG" ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
|
||||
else
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -latomic"
|
||||
fi
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
|
||||
# PORT_FILES=port/gnu_kfreebsd/gnu_kfreebsd_specific.cc
|
||||
;;
|
||||
NetBSD)
|
||||
PLATFORM=OS_NETBSD
|
||||
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_NETBSD"
|
||||
@ -269,20 +221,15 @@ esac
|
||||
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS ${CXXFLAGS}"
|
||||
JAVA_LDFLAGS="$PLATFORM_LDFLAGS"
|
||||
JAVA_STATIC_LDFLAGS="$PLATFORM_LDFLAGS"
|
||||
JAVAC_ARGS="-source 8"
|
||||
|
||||
if [ "$CROSS_COMPILE" = "true" -o "$FBCODE_BUILD" = "true" ]; then
|
||||
# Cross-compiling; do not try any compilation tests.
|
||||
# Also don't need any compilation tests if compiling on fbcode
|
||||
if [ "$FBCODE_BUILD" = "true" ]; then
|
||||
# Enable backtrace on fbcode since the necessary libraries are present
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
|
||||
fi
|
||||
true
|
||||
else
|
||||
if ! test $ROCKSDB_DISABLE_FALLOCATE; then
|
||||
# Test whether fallocate is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <fcntl.h>
|
||||
#include <linux/falloc.h>
|
||||
int main() {
|
||||
@ -298,7 +245,7 @@ EOF
|
||||
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||
# Test whether Snappy library is installed
|
||||
# http://code.google.com/p/snappy/
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <snappy.h>
|
||||
int main() {}
|
||||
EOF
|
||||
@ -313,38 +260,30 @@ EOF
|
||||
# Test whether gflags library is installed
|
||||
# http://gflags.github.io/gflags/
|
||||
# check if the namespace is gflags
|
||||
if $CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null << EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
|
||||
#include <gflags/gflags.h>
|
||||
using namespace GFLAGS_NAMESPACE;
|
||||
int main() {}
|
||||
EOF
|
||||
then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=1"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
||||
# check if namespace is gflags
|
||||
elif $CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null << EOF
|
||||
#include <gflags/gflags.h>
|
||||
using namespace gflags;
|
||||
int main() {}
|
||||
EOF
|
||||
then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=1 -DGFLAGS_NAMESPACE=gflags"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
||||
# check if namespace is google
|
||||
elif $CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null << EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=1"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
||||
else
|
||||
# check if namespace is google
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
|
||||
#include <gflags/gflags.h>
|
||||
using namespace google;
|
||||
int main() {}
|
||||
EOF
|
||||
then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=1 -DGFLAGS_NAMESPACE=google"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=google"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||
# Test whether zlib library is installed
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <zlib.h>
|
||||
int main() {}
|
||||
EOF
|
||||
@ -357,7 +296,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||
# Test whether bzip library is installed
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <bzlib.h>
|
||||
int main() {}
|
||||
EOF
|
||||
@ -370,7 +309,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||
# Test whether lz4 library is installed
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <lz4.h>
|
||||
#include <lz4hc.h>
|
||||
int main() {}
|
||||
@ -384,7 +323,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||
# Test whether zstd library is installed
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <zstd.h>
|
||||
int main() {}
|
||||
EOF
|
||||
@ -397,7 +336,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_NUMA; then
|
||||
# Test whether numa is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o -lnuma 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null -lnuma 2>/dev/null <<EOF
|
||||
#include <numa.h>
|
||||
#include <numaif.h>
|
||||
int main() {}
|
||||
@ -411,7 +350,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_TBB; then
|
||||
# Test whether tbb is available
|
||||
$CXX $PLATFORM_CXXFLAGS $LDFLAGS -x c++ - -o test.o -ltbb 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS $LDFLAGS -x c++ - -o /dev/null -ltbb 2>/dev/null <<EOF
|
||||
#include <tbb/tbb.h>
|
||||
int main() {}
|
||||
EOF
|
||||
@ -424,7 +363,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_JEMALLOC; then
|
||||
# Test whether jemalloc is available
|
||||
if echo 'int main() {}' | $CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o -ljemalloc \
|
||||
if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null -ljemalloc \
|
||||
2>/dev/null; then
|
||||
# This will enable some preprocessor identifiers in the Makefile
|
||||
JEMALLOC=1
|
||||
@ -445,7 +384,7 @@ EOF
|
||||
fi
|
||||
if ! test $JEMALLOC && ! test $ROCKSDB_DISABLE_TCMALLOC; then
|
||||
# jemalloc is not available. Let's try tcmalloc
|
||||
if echo 'int main() {}' | $CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o \
|
||||
if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \
|
||||
-ltcmalloc 2>/dev/null; then
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltcmalloc"
|
||||
JAVA_LDFLAGS="$JAVA_LDFLAGS -ltcmalloc"
|
||||
@ -454,11 +393,10 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_MALLOC_USABLE_SIZE; then
|
||||
# Test whether malloc_usable_size is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <malloc.h>
|
||||
int main() {
|
||||
size_t res = malloc_usable_size(0);
|
||||
(void)res;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
@ -467,29 +405,12 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_MEMKIND; then
|
||||
# Test whether memkind library is installed
|
||||
$CXX $PLATFORM_CXXFLAGS $LDFLAGS -x c++ - -o test.o -lmemkind 2>/dev/null <<EOF
|
||||
#include <memkind.h>
|
||||
int main() {
|
||||
memkind_malloc(MEMKIND_DAX_KMEM, 1024);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DMEMKIND"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lmemkind"
|
||||
JAVA_LDFLAGS="$JAVA_LDFLAGS -lmemkind"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_PTHREAD_MUTEX_ADAPTIVE_NP; then
|
||||
# Test whether PTHREAD_MUTEX_ADAPTIVE_NP mutex type is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <pthread.h>
|
||||
int main() {
|
||||
int x = PTHREAD_MUTEX_ADAPTIVE_NP;
|
||||
(void)x;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
@ -500,8 +421,8 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BACKTRACE; then
|
||||
# Test whether backtrace is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <execinfo.h>
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <execinfo.h>>
|
||||
int main() {
|
||||
void* frames[1];
|
||||
backtrace_symbols(frames, backtrace(frames, 1));
|
||||
@ -512,7 +433,7 @@ EOF
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
|
||||
else
|
||||
# Test whether execinfo library is installed
|
||||
$CXX $PLATFORM_CXXFLAGS -lexecinfo -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -lexecinfo -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <execinfo.h>
|
||||
int main() {
|
||||
void* frames[1];
|
||||
@ -529,7 +450,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_PG; then
|
||||
# Test if -pg is supported
|
||||
$CXX $PLATFORM_CXXFLAGS -pg -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -pg -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
int main() {
|
||||
return 0;
|
||||
}
|
||||
@ -541,7 +462,7 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SYNC_FILE_RANGE; then
|
||||
# Test whether sync_file_range is supported for compatibility with an old glibc
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <fcntl.h>
|
||||
int main() {
|
||||
int fd = open("/dev/null", 0);
|
||||
@ -555,11 +476,10 @@ EOF
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SCHED_GETCPU; then
|
||||
# Test whether sched_getcpu is supported
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <sched.h>
|
||||
int main() {
|
||||
int cpuid = sched_getcpu();
|
||||
(void)cpuid;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
@ -567,23 +487,9 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_AUXV_GETAUXVAL; then
|
||||
# Test whether getauxval is supported
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <sys/auxv.h>
|
||||
int main() {
|
||||
uint64_t auxv = getauxval(AT_HWCAP);
|
||||
(void)auxv;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_AUXV_GETAUXVAL_PRESENT"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ALIGNED_NEW; then
|
||||
# Test whether c++17 aligned-new is supported
|
||||
$CXX $PLATFORM_CXXFLAGS -faligned-new -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $PLATFORM_CXXFLAGS -faligned-new -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
struct alignas(1024) t {int a;};
|
||||
int main() {}
|
||||
EOF
|
||||
@ -591,23 +497,13 @@ EOF
|
||||
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS -faligned-new -DHAVE_ALIGNED_NEW"
|
||||
fi
|
||||
fi
|
||||
if ! test $ROCKSDB_DISABLE_BENCHMARK; then
|
||||
# Test whether google benchmark is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o /dev/null -lbenchmark -lpthread 2>/dev/null <<EOF
|
||||
#include <benchmark/benchmark.h>
|
||||
int main() {}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lbenchmark"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# TODO(tec): Fix -Wshorten-64-to-32 errors on FreeBSD and enable the warning.
|
||||
# -Wshorten-64-to-32 breaks compilation on FreeBSD aarch64 and i386
|
||||
if ! { [ "$TARGET_OS" = FreeBSD ] && [ "$TARGET_ARCHITECTURE" = arm64 -o "$TARGET_ARCHITECTURE" = i386 ]; }; then
|
||||
# -Wshorten-64-to-32 breaks compilation on FreeBSD i386
|
||||
if ! [ "$TARGET_OS" = FreeBSD -a "$TARGET_ARCHITECTURE" = i386 ]; then
|
||||
# Test whether -Wshorten-64-to-32 is available
|
||||
$CXX $PLATFORM_CXXFLAGS -x c++ - -o test.o -Wshorten-64-to-32 2>/dev/null <<EOF
|
||||
$CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null <<EOF
|
||||
int main() {}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
@ -615,119 +511,58 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "0$PORTABLE" -eq 0; then
|
||||
# shall we use HDFS?
|
||||
|
||||
if test "$USE_HDFS"; then
|
||||
if test -z "$JAVA_HOME"; then
|
||||
echo "JAVA_HOME has to be set for HDFS usage."
|
||||
exit 1
|
||||
fi
|
||||
HDFS_CCFLAGS="$HDFS_CCFLAGS -I$JAVA_HOME/include -I$JAVA_HOME/include/linux -DUSE_HDFS -I$HADOOP_HOME/include"
|
||||
HDFS_LDFLAGS="$HDFS_LDFLAGS -lhdfs -L$JAVA_HOME/jre/lib/amd64 -L$HADOOP_HOME/lib/native"
|
||||
HDFS_LDFLAGS="$HDFS_LDFLAGS -L$JAVA_HOME/jre/lib/amd64/server -L$GLIBC_RUNTIME_PATH/lib"
|
||||
HDFS_LDFLAGS="$HDFS_LDFLAGS -ldl -lverify -ljava -ljvm"
|
||||
COMMON_FLAGS="$COMMON_FLAGS $HDFS_CCFLAGS"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS $HDFS_LDFLAGS"
|
||||
JAVA_LDFLAGS="$JAVA_LDFLAGS $HDFS_LDFLAGS"
|
||||
fi
|
||||
|
||||
if test -z "$PORTABLE"; then
|
||||
if test -n "`echo $TARGET_ARCHITECTURE | grep ^ppc64`"; then
|
||||
# Tune for this POWER processor, treating '+' models as base models
|
||||
POWER=`LD_SHOW_AUXV=1 /bin/true | grep AT_PLATFORM | grep -E -o power[0-9]+`
|
||||
COMMON_FLAGS="$COMMON_FLAGS -mcpu=$POWER -mtune=$POWER "
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep -e^arm -e^aarch64`"; then
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^s390x`"; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=z10 "
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^arm`"; then
|
||||
# TODO: Handle this with approprite options.
|
||||
COMMON_FLAGS="$COMMON_FLAGS"
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^aarch64`"; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS"
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^s390x`"; then
|
||||
if echo 'int main() {}' | $CXX $PLATFORM_CXXFLAGS -x c++ \
|
||||
-march=native - -o /dev/null 2>/dev/null; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=native "
|
||||
else
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=z196 "
|
||||
fi
|
||||
COMMON_FLAGS="$COMMON_FLAGS"
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^riscv64`"; then
|
||||
RISC_ISA=$(cat /proc/cpuinfo | grep isa | head -1 | cut --delimiter=: -f 2 | cut -b 2-)
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=${RISC_ISA}"
|
||||
elif [ "$TARGET_OS" == "IOS" ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS"
|
||||
elif [ "$TARGET_OS" == "AIX" ] || [ "$TARGET_OS" == "SunOS" ]; then
|
||||
# TODO: Not sure why we don't use -march=native on these OSes
|
||||
if test "$USE_SSE"; then
|
||||
TRY_SSE_ETC="1"
|
||||
fi
|
||||
else
|
||||
elif [ "$TARGET_OS" != "AIX" ] && [ "$TARGET_OS" != "SunOS" ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=native "
|
||||
elif test "$USE_SSE"; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -msse4.2 -mpclmul"
|
||||
fi
|
||||
else
|
||||
# PORTABLE=1
|
||||
if test "$USE_SSE"; then
|
||||
TRY_SSE_ETC="1"
|
||||
fi
|
||||
|
||||
if test -n "`echo $TARGET_ARCHITECTURE | grep ^s390x`"; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=z196 "
|
||||
fi
|
||||
|
||||
if test -n "`echo $TARGET_ARCHITECTURE | grep ^riscv64`"; then
|
||||
RISC_ISA=$(cat /proc/cpuinfo | grep isa | head -1 | cut --delimiter=: -f 2 | cut -b 2-)
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=${RISC_ISA}"
|
||||
fi
|
||||
|
||||
if [[ "${PLATFORM}" == "OS_MACOSX" ]]; then
|
||||
# For portability compile for macOS 10.12 (2016) or newer
|
||||
COMMON_FLAGS="$COMMON_FLAGS -mmacosx-version-min=10.12"
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -mmacosx-version-min=10.12"
|
||||
# -mmacosx-version-min must come first here.
|
||||
PLATFORM_SHARED_LDFLAGS="-mmacosx-version-min=10.12 $PLATFORM_SHARED_LDFLAGS"
|
||||
PLATFORM_CMAKE_FLAGS="-DCMAKE_OSX_DEPLOYMENT_TARGET=10.12"
|
||||
JAVA_STATIC_DEPS_COMMON_FLAGS="-mmacosx-version-min=10.12"
|
||||
JAVA_STATIC_DEPS_LDFLAGS="$JAVA_STATIC_DEPS_COMMON_FLAGS"
|
||||
JAVA_STATIC_DEPS_CCFLAGS="$JAVA_STATIC_DEPS_COMMON_FLAGS"
|
||||
JAVA_STATIC_DEPS_CXXFLAGS="$JAVA_STATIC_DEPS_COMMON_FLAGS"
|
||||
fi
|
||||
elif test "$USE_SSE"; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -msse4.2 -mpclmul"
|
||||
fi
|
||||
|
||||
if test -n "`echo $TARGET_ARCHITECTURE | grep ^ppc64`"; then
|
||||
# check for GNU libc on ppc64
|
||||
$CXX -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <gnu/libc-version.h>
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
printf("GNU libc version: %s\n", gnu_get_libc_version());
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" != 0 ]; then
|
||||
PPC_LIBC_IS_GNU=0
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "$TRY_SSE_ETC"; then
|
||||
# The USE_SSE flag now means "attempt to compile with widely-available
|
||||
# Intel architecture extensions utilized by specific optimizations in the
|
||||
# source code." It's a qualifier on PORTABLE=1 that means "mostly portable."
|
||||
# It doesn't even really check that your current CPU is compatible.
|
||||
#
|
||||
# SSE4.2 available since nehalem, ca. 2008-2010
|
||||
# Includes POPCNT for BitsSetToOne, BitParity
|
||||
TRY_SSE42="-msse4.2"
|
||||
# PCLMUL available since westmere, ca. 2010-2011
|
||||
TRY_PCLMUL="-mpclmul"
|
||||
# AVX2 available since haswell, ca. 2013-2015
|
||||
TRY_AVX2="-mavx2"
|
||||
# BMI available since haswell, ca. 2013-2015
|
||||
# Primarily for TZCNT for CountTrailingZeroBits
|
||||
TRY_BMI="-mbmi"
|
||||
# LZCNT available since haswell, ca. 2013-2015
|
||||
# For FloorLog2
|
||||
TRY_LZCNT="-mlzcnt"
|
||||
fi
|
||||
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_SSE42 -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <cstdint>
|
||||
#include <nmmintrin.h>
|
||||
int main() {
|
||||
volatile uint32_t x = _mm_crc32_u32(0, 0);
|
||||
(void)x;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS $TRY_SSE42 -DHAVE_SSE42"
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DHAVE_SSE42"
|
||||
elif test "$USE_SSE"; then
|
||||
echo "warning: USE_SSE specified but compiler could not use SSE intrinsics, disabling" >&2
|
||||
echo "warning: USE_SSE specified but compiler could not use SSE intrinsics, disabling"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_PCLMUL -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#include <cstdint>
|
||||
#include <wmmintrin.h>
|
||||
int main() {
|
||||
@ -735,101 +570,32 @@ $CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_PCLMUL -x c++ - -o test.o 2>/dev/null
|
||||
const auto b = _mm_set_epi64x(0, 0);
|
||||
const auto c = _mm_clmulepi64_si128(a, b, 0x00);
|
||||
auto d = _mm_cvtsi128_si64(c);
|
||||
(void)d;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS $TRY_PCLMUL -DHAVE_PCLMUL"
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DHAVE_PCLMUL"
|
||||
elif test "$USE_SSE"; then
|
||||
echo "warning: USE_SSE specified but compiler could not use PCLMUL intrinsics, disabling" >&2
|
||||
echo "warning: USE_SSE specified but compiler could not use PCLMUL intrinsics, disabling"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_AVX2 -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <cstdint>
|
||||
#include <immintrin.h>
|
||||
# iOS doesn't support thread-local storage, but this check would erroneously
|
||||
# succeed because the cross-compiler flags are added by the Makefile, not this
|
||||
# script.
|
||||
if [ "$PLATFORM" != IOS ]; then
|
||||
$CXX $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
|
||||
#if defined(_MSC_VER) && !defined(__thread)
|
||||
#define __thread __declspec(thread)
|
||||
#endif
|
||||
int main() {
|
||||
const auto a = _mm256_setr_epi32(0, 1, 2, 3, 4, 7, 6, 5);
|
||||
const auto b = _mm256_permutevar8x32_epi32(a, a);
|
||||
(void)b;
|
||||
static __thread int tls;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS $TRY_AVX2 -DHAVE_AVX2"
|
||||
elif test "$USE_SSE"; then
|
||||
echo "warning: USE_SSE specified but compiler could not use AVX2 intrinsics, disabling" >&2
|
||||
fi
|
||||
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_BMI -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <cstdint>
|
||||
#include <immintrin.h>
|
||||
int main(int argc, char *argv[]) {
|
||||
(void)argv;
|
||||
return (int)_tzcnt_u64((uint64_t)argc);
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS $TRY_BMI -DHAVE_BMI"
|
||||
elif test "$USE_SSE"; then
|
||||
echo "warning: USE_SSE specified but compiler could not use BMI intrinsics, disabling" >&2
|
||||
fi
|
||||
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS $TRY_LZCNT -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <cstdint>
|
||||
#include <immintrin.h>
|
||||
int main(int argc, char *argv[]) {
|
||||
(void)argv;
|
||||
return (int)_lzcnt_u64((uint64_t)argc);
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS $TRY_LZCNT -DHAVE_LZCNT"
|
||||
elif test "$USE_SSE"; then
|
||||
echo "warning: USE_SSE specified but compiler could not use LZCNT intrinsics, disabling" >&2
|
||||
fi
|
||||
|
||||
$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <cstdint>
|
||||
int main() {
|
||||
uint64_t a = 0xffffFFFFffffFFFF;
|
||||
__uint128_t b = __uint128_t(a) * a;
|
||||
a = static_cast<uint64_t>(b >> 64);
|
||||
(void)a;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DHAVE_UINT128_EXTENSION"
|
||||
fi
|
||||
|
||||
# thread_local is part of C++11 and later (TODO: clean up this define)
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SUPPORT_THREAD_LOCAL"
|
||||
|
||||
if [ "$FBCODE_BUILD" != "true" -a "$PLATFORM" = OS_LINUX ]; then
|
||||
$CXX $COMMON_FLAGS $PLATFORM_SHARED_CFLAGS -x c++ -c - -o test_dl.o 2>/dev/null <<EOF
|
||||
void dummy_func() {}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
$CXX $COMMON_FLAGS $PLATFORM_SHARED_LDFLAGS test_dl.o -o test.o 2>/dev/null
|
||||
if [ "$?" = 0 ]; then
|
||||
EXEC_LDFLAGS+="-ldl"
|
||||
rm -f test_dl.o
|
||||
fi
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SUPPORT_THREAD_LOCAL"
|
||||
fi
|
||||
fi
|
||||
|
||||
# check for F_FULLFSYNC
|
||||
$CXX $PLATFORM_CXXFALGS -x c++ - -o test.o 2>/dev/null <<EOF
|
||||
#include <fcntl.h>
|
||||
int main() {
|
||||
fcntl(0, F_FULLFSYNC);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DHAVE_FULLFSYNC"
|
||||
fi
|
||||
|
||||
rm -f test.o test_dl.o
|
||||
|
||||
PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
|
||||
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
|
||||
|
||||
@ -841,16 +607,10 @@ ROCKSDB_PATCH=`build_tools/version.sh patch`
|
||||
|
||||
echo "CC=$CC" >> "$OUTPUT"
|
||||
echo "CXX=$CXX" >> "$OUTPUT"
|
||||
echo "AR=$AR" >> "$OUTPUT"
|
||||
echo "PLATFORM=$PLATFORM" >> "$OUTPUT"
|
||||
echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> "$OUTPUT"
|
||||
echo "PLATFORM_CMAKE_FLAGS=$PLATFORM_CMAKE_FLAGS" >> "$OUTPUT"
|
||||
echo "JAVA_LDFLAGS=$JAVA_LDFLAGS" >> "$OUTPUT"
|
||||
echo "JAVA_STATIC_LDFLAGS=$JAVA_STATIC_LDFLAGS" >> "$OUTPUT"
|
||||
echo "JAVA_STATIC_DEPS_CCFLAGS=$JAVA_STATIC_DEPS_CCFLAGS" >> "$OUTPUT"
|
||||
echo "JAVA_STATIC_DEPS_CXXFLAGS=$JAVA_STATIC_DEPS_CXXFLAGS" >> "$OUTPUT"
|
||||
echo "JAVA_STATIC_DEPS_LDFLAGS=$JAVA_STATIC_DEPS_LDFLAGS" >> "$OUTPUT"
|
||||
echo "JAVAC_ARGS=$JAVAC_ARGS" >> "$OUTPUT"
|
||||
echo "VALGRIND_VER=$VALGRIND_VER" >> "$OUTPUT"
|
||||
echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> "$OUTPUT"
|
||||
echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> "$OUTPUT"
|
||||
@ -880,9 +640,3 @@ if test -n "$WITH_JEMALLOC_FLAG"; then
|
||||
echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT"
|
||||
fi
|
||||
echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT"
|
||||
if test -n "$USE_FOLLY"; then
|
||||
echo "USE_FOLLY=$USE_FOLLY" >> "$OUTPUT"
|
||||
fi
|
||||
if test -n "$PPC_LIBC_IS_GNU"; then
|
||||
echo "PPC_LIBC_IS_GNU=$PPC_LIBC_IS_GNU" >> "$OUTPUT"
|
||||
fi
|
||||
|
@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Check for some simple mistakes that should prevent commit or push
|
||||
|
||||
BAD=""
|
||||
|
||||
git grep 'namespace rocksdb' -- '*.[ch]*'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo "^^^^^ Do not hardcode namespace rocksdb. Use ROCKSDB_NAMESPACE"
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep -i 'nocommit' -- ':!build_tools/check-sources.sh'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo "^^^^^ Code was not intended to be committed"
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep '<rocksdb/' -- ':!build_tools/check-sources.sh'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo '^^^^^ Use double-quotes as in #include "rocksdb/something.h"'
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep 'using namespace' -- ':!build_tools' ':!docs' \
|
||||
':!third-party/folly/folly/lang/Align.h' \
|
||||
':!third-party/gtest-1.8.1/fused-src/gtest/gtest.h'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo '^^^^ Do not use "using namespace"'
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
if [ "$BAD" ]; then
|
||||
exit 1
|
||||
fi
|
137
build_tools/cont_integration.sh
Executable file
137
build_tools/cont_integration.sh
Executable file
@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2016, Facebook. All rights reserved.
|
||||
#
|
||||
# Overall wrapper script for RocksDB continuous builds. The implementation is a
|
||||
# trivial pulling scheme. We loop infinitely, check if any new changes have been
|
||||
# committed, if yes then trigger a Sandcastle run, and finally go to sleep again
|
||||
# for a certain interval.
|
||||
#
|
||||
|
||||
SRC_GIT_REPO=/data/git/rocksdb-public
|
||||
error=0
|
||||
|
||||
function log {
|
||||
DATE=`date +%Y-%m-%d:%H:%M:%S`
|
||||
# shellcheck disable=SC2068
|
||||
echo $DATE $@
|
||||
}
|
||||
|
||||
function log_err {
|
||||
# shellcheck disable=SC2145
|
||||
log "ERROR: $@ Error code: $error."
|
||||
}
|
||||
|
||||
function update_repo_status {
|
||||
# Update the parent first.
|
||||
pushd $SRC_GIT_REPO
|
||||
|
||||
# This is a fatal error. Something in the environment isn't right and we will
|
||||
# terminate the execution.
|
||||
error=$?
|
||||
if [ ! $error -eq 0 ]; then
|
||||
log_err "Where is $SRC_GIT_REPO?"
|
||||
exit $error
|
||||
fi
|
||||
|
||||
HTTPS_PROXY=fwdproxy:8080 git fetch -f
|
||||
|
||||
error=$?
|
||||
if [ ! $error -eq 0 ]; then
|
||||
log_err "git fetch -f failed."
|
||||
popd
|
||||
return $error
|
||||
fi
|
||||
|
||||
git update-ref refs/heads/master refs/remotes/origin/master
|
||||
|
||||
error=$?
|
||||
if [ ! $error -eq 0 ]; then
|
||||
log_err "git update-ref failed."
|
||||
popd
|
||||
return $error
|
||||
fi
|
||||
|
||||
popd
|
||||
|
||||
# We're back in an instance-specific directory. Get the latest changes.
|
||||
git pull --rebase
|
||||
|
||||
error=$?
|
||||
if [ ! $error -eq 0 ]; then
|
||||
log_err "git pull --rebase failed."
|
||||
return $error
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Execution starts here.
|
||||
#
|
||||
|
||||
# Path to the determinator from the root of the RocksDB repo.
|
||||
CONTRUN_DETERMINATOR=./build_tools/RocksDBCommonHelper.php
|
||||
|
||||
# Value of the previous commit.
|
||||
PREV_COMMIT=
|
||||
|
||||
log "Starting to monitor for new RocksDB changes ..."
|
||||
log "Running under `pwd` as `whoami`."
|
||||
|
||||
# Paranoia. Make sure that we're using the right branch.
|
||||
git checkout master
|
||||
|
||||
error=$?
|
||||
if [ ! $error -eq 0 ]; then
|
||||
log_err "This is not good. Can't checkout master. Bye-bye!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We'll run forever and let the execution environment terminate us if we'll
|
||||
# exceed whatever timeout is set for the job.
|
||||
while true;
|
||||
do
|
||||
# Get the latest changes committed.
|
||||
update_repo_status
|
||||
|
||||
error=$?
|
||||
if [ $error -eq 0 ]; then
|
||||
LAST_COMMIT=`git log -1 | head -1 | grep commit | awk '{ print $2; }'`
|
||||
|
||||
log "Last commit is '$LAST_COMMIT', previous commit is '$PREV_COMMIT'."
|
||||
|
||||
if [ "$PREV_COMMIT" == "$LAST_COMMIT" ]; then
|
||||
log "There were no changes since the last time I checked. Going to sleep."
|
||||
else
|
||||
if [ ! -z "$LAST_COMMIT" ]; then
|
||||
log "New code has been committed or previous commit not known. " \
|
||||
"Will trigger the tests."
|
||||
|
||||
PREV_COMMIT=$LAST_COMMIT
|
||||
log "Updated previous commit to '$PREV_COMMIT'."
|
||||
|
||||
#
|
||||
# This is where we'll trigger the Sandcastle run. The values for
|
||||
# HTTPS_APP_VALUE and HTTPS_APP_VALUE will be set in the container we're
|
||||
# running in.
|
||||
#
|
||||
POST_RECEIVE_HOOK=1 php $CONTRUN_DETERMINATOR
|
||||
|
||||
error=$?
|
||||
if [ $error -eq 0 ]; then
|
||||
log "Sandcastle run successfully triggered."
|
||||
else
|
||||
log_err "Failed to trigger Sandcastle run."
|
||||
fi
|
||||
else
|
||||
log_err "Previous commit not updated. Don't know what the last one is."
|
||||
fi
|
||||
fi
|
||||
else
|
||||
log_err "Getting latest changes failed. Will skip running tests for now."
|
||||
fi
|
||||
|
||||
# Always sleep, even if errors happens while trying to determine the latest
|
||||
# commit. This will prevent us terminating in case of transient errors.
|
||||
log "Will go to sleep for 5 minutes."
|
||||
sleep 5m
|
||||
done
|
19
build_tools/dependencies.sh
Normal file
19
build_tools/dependencies.sh
Normal file
@ -0,0 +1,19 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
GCC_BASE=/mnt/gvfs/third-party2/gcc/112ec378fec7002ad3e09afde022e656049f7191/5.x/centos7-native/c447969
|
||||
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/04999bdb3ce81a11073535dcb00b5e13dc1cbaf5/stable/centos7-native/c9f9104
|
||||
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/92b0c8e5c8eecc71eb042594ce1ab3413799b385/5.x/gcc-5-glibc-2.23/339d858
|
||||
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/3d8698d5973ba94f41620a80a67e4457fdf01e90/2.23/gcc-5-glibc-2.23/ca1d1c0
|
||||
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/7f9bdaada18f59bc27ec2b0871eb8a6144343aef/1.1.3/gcc-5-glibc-2.23/9bc6787
|
||||
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/22c2d65676fb7c23cfa797c4f6937f38b026f3cf/1.2.8/gcc-5-glibc-2.23/9bc6787
|
||||
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/dc49a21c5fceec6456a7a28a94dcd16690af1337/1.0.6/gcc-5-glibc-2.23/9bc6787
|
||||
LZ4_BASE=/mnt/gvfs/third-party2/lz4/907b498203d297947f3bb70b9466f47e100f1873/r131/gcc-5-glibc-2.23/9bc6787
|
||||
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/af6628a46758f1a15484a1760cd7294164bc5ba1/1.3.5/gcc-5-glibc-2.23/03859b5
|
||||
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/0b9929d2588991c65a57168bf88aff2db87c5d48/2.2.0/gcc-5-glibc-2.23/9bc6787
|
||||
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/b1a0e56c1e3e6929813a4331ade3a58ff083afbb/master/gcc-5-glibc-2.23/aa64d6b
|
||||
NUMA_BASE=/mnt/gvfs/third-party2/numa/9cbf2460284c669ed19c3ccb200a71f7dd7e53c7/2.0.11/gcc-5-glibc-2.23/9bc6787
|
||||
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/bf3d7497fe4e6d007354f0adffa16ce3003f8338/1.3/gcc-5-glibc-2.23/b443de1
|
||||
TBB_BASE=/mnt/gvfs/third-party2/tbb/ff4e0b093534704d8abab678a4fd7f5ea7b094c7/2018_U5/gcc-5-glibc-2.23/9bc6787
|
||||
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/b5c4a61a5c483ba24722005ae07895971a2ac707/4.0.9-36_fbk5_2933_gd092e3f/gcc-5-glibc-2.23/da39a3e
|
||||
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/55031de95a2b46c82948743419a603b3d6aefe28/2.29.1/centos7-native/da39a3e
|
||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/f3f697a28122e6bcd513273dd9c1ff23852fc59f/3.13.0/gcc-5-glibc-2.23/9bc6787
|
||||
LUA_BASE=/mnt/gvfs/third-party2/lua/f0cd714433206d5139df61659eb7b28b1dea6683/5.2.3/gcc-5-glibc-2.23/65372bd
|
20
build_tools/dependencies_4.8.1.sh
Normal file
20
build_tools/dependencies_4.8.1.sh
Normal file
@ -0,0 +1,20 @@
|
||||
# shellcheck disable=SC2148
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
GCC_BASE=/mnt/gvfs/third-party2/gcc/cf7d14c625ce30bae1a4661c2319c5a283e4dd22/4.8.1/centos6-native/cc6c9dc
|
||||
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/8598c375b0e94e1448182eb3df034704144a838d/stable/centos6-native/3f16ddd
|
||||
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/d6e0a7da6faba45f5e5b1638f9edd7afc2f34e7d/4.8.1/gcc-4.8.1-glibc-2.17/8aac7fc
|
||||
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/d282e6e8f3d20f4e40a516834847bdc038e07973/2.17/gcc-4.8.1-glibc-2.17/99df8fc
|
||||
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/8c38a4c1e52b4c2cc8a9cdc31b9c947ed7dbfcb4/1.1.3/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/0882df3713c7a84f15abe368dc004581f20b39d7/1.2.8/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/740325875f6729f42d28deaa2147b0854f3a347e/1.0.6/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
LZ4_BASE=/mnt/gvfs/third-party2/lz4/0e790b441e2d9acd68d51e1d2e028f88c6a79ddf/r131/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/9455f75ff7f4831dc9fda02a6a0f8c68922fad8f/1.0.0/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/f001a51b2854957676d07306ef3abf67186b5c8b/2.1.1/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/fc8a13ca1fffa4d0765c716c5a0b49f0c107518f/master/gcc-4.8.1-glibc-2.17/8d31e51
|
||||
NUMA_BASE=/mnt/gvfs/third-party2/numa/17c514c4d102a25ca15f4558be564eeed76f4b6a/2.0.8/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/ad576de2a1ea560c4d3434304f0fc4e079bede42/trunk/gcc-4.8.1-glibc-2.17/675d945
|
||||
TBB_BASE=/mnt/gvfs/third-party2/tbb/9d9a554877d0c5bef330fe818ab7178806dd316a/4.0_update2/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/7c111ff27e0c466235163f00f280a9d617c3d2ec/4.0.9-36_fbk5_2933_gd092e3f/gcc-4.8.1-glibc-2.17/da39a3e
|
||||
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/b7fd454c4b10c6a81015d4524ed06cdeab558490/2.26/centos6-native/da39a3e
|
||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/d7f4d4d86674a57668e3a96f76f0e17dd0eb8765/3.8.1/gcc-4.8.1-glibc-2.17/c3f970a
|
||||
LUA_BASE=/mnt/gvfs/third-party2/lua/61e4abf5813bbc39bc4f548757ccfcadde175a48/5.2.3/centos6-native/730f94e
|
19
build_tools/dependencies_platform007.sh
Normal file
19
build_tools/dependencies_platform007.sh
Normal file
@ -0,0 +1,19 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
GCC_BASE=/mnt/gvfs/third-party2/gcc/6e8e715624fd15256a7970073387793dfcf79b46/7.x/centos7-native/b2ef2b6
|
||||
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/ef37e1faa1c29782abfac1ae65a291b9b7966f6d/stable/centos7-native/c9f9104
|
||||
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/c67031f0f739ac61575a061518d6ef5038f99f90/7.x/platform007/5620abc
|
||||
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/60d6f124a78798b73944f5ba87c2306ae3460153/2.26/platform007/f259413
|
||||
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/7f9bdaada18f59bc27ec2b0871eb8a6144343aef/1.1.3/platform007/ca4da3d
|
||||
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/22c2d65676fb7c23cfa797c4f6937f38b026f3cf/1.2.8/platform007/ca4da3d
|
||||
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/dc49a21c5fceec6456a7a28a94dcd16690af1337/1.0.6/platform007/ca4da3d
|
||||
LZ4_BASE=/mnt/gvfs/third-party2/lz4/907b498203d297947f3bb70b9466f47e100f1873/r131/platform007/ca4da3d
|
||||
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/3ee276cbacfad3074e3f07bf826ac47f06970f4e/1.3.5/platform007/15a3614
|
||||
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/0b9929d2588991c65a57168bf88aff2db87c5d48/2.2.0/platform007/ca4da3d
|
||||
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/9c910d36d6235cc40e8ff559358f1833452300ca/master/platform007/5b0f53e
|
||||
NUMA_BASE=/mnt/gvfs/third-party2/numa/9cbf2460284c669ed19c3ccb200a71f7dd7e53c7/2.0.11/platform007/ca4da3d
|
||||
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/bf3d7497fe4e6d007354f0adffa16ce3003f8338/1.3/platform007/6f3e0a9
|
||||
TBB_BASE=/mnt/gvfs/third-party2/tbb/ff4e0b093534704d8abab678a4fd7f5ea7b094c7/2018_U5/platform007/ca4da3d
|
||||
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/b5c4a61a5c483ba24722005ae07895971a2ac707/fb/platform007/da39a3e
|
||||
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/92ff90349e2f43ea0a8246d8b1cf17b6869013e3/2.29.1/centos7-native/da39a3e
|
||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/f3f697a28122e6bcd513273dd9c1ff23852fc59f/3.13.0/platform007/ca4da3d
|
||||
LUA_BASE=/mnt/gvfs/third-party2/lua/f0cd714433206d5139df61659eb7b28b1dea6683/5.3.4/platform007/5007832
|
@ -1,22 +0,0 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
GCC_BASE=/mnt/gvfs/third-party2/gcc/1795efe5f06778c15a92c8f9a2aba5dc496d9d4d/9.x/centos7-native/3bed279
|
||||
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/7318eaac22659b6ff2fe43918e4b69fd0772a8a7/9.0.0/platform009/651ee30
|
||||
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/4959b39cfbe5965a37c861c4c327fa7c5c759b87/9.x/platform009/9202ce7
|
||||
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/45ce3375cdc77ecb2520bbf8f0ecddd3f98efd7a/2.30/platform009/f259413
|
||||
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/be4de3205e029101b18aa8103daa696c2bef3b19/1.1.3/platform009/7f3b187
|
||||
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/3c160ac5c67e257501e24c6c1d00ad5e01d73db6/1.2.8/platform009/7f3b187
|
||||
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/73a237ac5bc0a5f5d67b39b8d253cfebaab88684/1.0.6/platform009/7f3b187
|
||||
LZ4_BASE=/mnt/gvfs/third-party2/lz4/ec6573523b0ce55ef6373a4801189027cf07bb2c/1.9.1/platform009/7f3b187
|
||||
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/64c58a207d2495e83abc57a500a956df09b79a7c/1.4.x/platform009/ba86d1f
|
||||
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/824d0a8a5abb5b121afd1b35fc3896407ea50092/2.2.0/platform009/7f3b187
|
||||
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/d9aef9feb850b168a68736420f217b01cce11a89/master/platform009/c305944
|
||||
NUMA_BASE=/mnt/gvfs/third-party2/numa/0af65f71e23a67bf65dc91b11f95caa39325c432/2.0.11/platform009/7f3b187
|
||||
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/02486dac347645d31dce116f44e1de3177315be2/1.4/platform009/5191652
|
||||
TBB_BASE=/mnt/gvfs/third-party2/tbb/2e0ec671e550bfca347300bf3f789d9c0fff24ad/2018_U5/platform009/7f3b187
|
||||
LIBURING_BASE=/mnt/gvfs/third-party2/liburing/70dbd9cfee63a25611417d09433a86d7711b3990/20200729/platform009/7f3b187
|
||||
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/32b8a2407b634df3f8f948ba373fc4acc6a18296/fb/platform009/da39a3e
|
||||
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/08634589372fa5f237bfd374e8c644a8364e78c1/2.32/platform009/ba86d1f/
|
||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/6ae525939ad02e5e676855082fbbc7828dbafeac/3.15.0/platform009/7f3b187
|
||||
LUA_BASE=/mnt/gvfs/third-party2/lua/162efd9561a3d21f6869f4814011e9cf1b3ff4dc/5.3.4/platform009/a6271c4
|
||||
BENCHMARK_BASE=/mnt/gvfs/third-party2/benchmark/30bf49ad6414325e17f3425b0edcb64239427ae3/1.6.1/platform009/7f3b187
|
||||
BOOST_BASE=/mnt/gvfs/third-party2/boost/201b7d74941e54b436dfa364a063aa6d2cd7de4c/1.69.0/platform009/8a7ffdf
|
@ -1,22 +0,0 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# The file is generated using update_dependencies.sh.
|
||||
GCC_BASE=/mnt/gvfs/third-party2/gcc/e40bde78650fa91b8405a857e3f10bf336633fb0/11.x/centos7-native/886b5eb
|
||||
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/2043340983c032915adbb6f78903dc855b65aee8/12/platform010/9520e0f
|
||||
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/c00dcc6a3e4125c7e8b248e9a79c14b78ac9e0ca/11.x/platform010/5684a5a
|
||||
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/0b9c8e4b060eda62f3bc1c6127bbe1256697569b/2.34/platform010/f259413
|
||||
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/bc9647f7912b131315827d65cb6189c21f381d05/1.1.3/platform010/76ebdda
|
||||
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/a6f5f3f1d063d2d00cd02fc12f0f05fc3ab3a994/1.2.11/platform010/76ebdda
|
||||
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/09703139cfc376bd8a82642385a0e97726b28287/1.0.6/platform010/76ebdda
|
||||
LZ4_BASE=/mnt/gvfs/third-party2/lz4/60220d6a5bf7722b9cc239a1368c596619b12060/1.9.1/platform010/76ebdda
|
||||
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/50eace8143eaaea9473deae1f3283e0049e05633/1.4.x/platform010/64091f4
|
||||
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/5d27e5919771603da06000a027b12f799e58a4f7/2.2.0/platform010/76ebdda
|
||||
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/b62912d333ef33f9760efa6219dbe3fe6abb3b0e/master/platform010/f57cc4a
|
||||
NUMA_BASE=/mnt/gvfs/third-party2/numa/6b412770957aa3c8a87e5e0dcd8cc2f45f393bc0/2.0.11/platform010/76ebdda
|
||||
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/52f69816e936e147664ad717eb71a1a0e9dc973a/1.4/platform010/5074a48
|
||||
TBB_BASE=/mnt/gvfs/third-party2/tbb/c9cc192099fa84c0dcd0ffeedd44a373ad6e4925/2018_U5/platform010/76ebdda
|
||||
LIBURING_BASE=/mnt/gvfs/third-party2/liburing/a98e2d137007e3ebf7f33bd6f99c2c56bdaf8488/20210212/platform010/76ebdda
|
||||
BENCHMARK_BASE=/mnt/gvfs/third-party2/benchmark/780c7a0f9cf0967961e69ad08e61cddd85d61821/trunk/platform010/76ebdda
|
||||
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/02d9f76aaaba580611cf75e741753c800c7fdc12/fb/platform010/da39a3e
|
||||
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/938dc3f064ef3a48c0446f5b11d788d50b3eb5ee/2.37/centos7-native/da39a3e
|
||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/429a6b3203eb415f1599bd15183659153129188e/3.15.0/platform010/76ebdda
|
||||
LUA_BASE=/mnt/gvfs/third-party2/lua/363787fa5cac2a8aa20638909210443278fa138e/5.3.4/platform010/9079c97
|
@ -133,16 +133,13 @@ _TEST_NAME_TO_PARSERS = {
|
||||
'lite_test': [CompilerErrorParser, GTestErrorParser],
|
||||
'stress_crash': [CompilerErrorParser, DbCrashErrorParser],
|
||||
'stress_crash_with_atomic_flush': [CompilerErrorParser, DbCrashErrorParser],
|
||||
'stress_crash_with_txn': [CompilerErrorParser, DbCrashErrorParser],
|
||||
'write_stress': [CompilerErrorParser, WriteStressErrorParser],
|
||||
'asan': [CompilerErrorParser, GTestErrorParser, AsanErrorParser],
|
||||
'asan_crash': [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
|
||||
'asan_crash_with_atomic_flush': [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
|
||||
'asan_crash_with_txn': [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
|
||||
'ubsan': [CompilerErrorParser, GTestErrorParser, UbsanErrorParser],
|
||||
'ubsan_crash': [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
|
||||
'ubsan_crash_with_atomic_flush': [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
|
||||
'ubsan_crash_with_txn': [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
|
||||
'valgrind': [CompilerErrorParser, GTestErrorParser, ValgrindErrorParser],
|
||||
'tsan': [CompilerErrorParser, GTestErrorParser, TsanErrorParser],
|
||||
'format_compatible': [CompilerErrorParser, CompatErrorParser],
|
||||
|
@ -21,48 +21,38 @@ LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
|
||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
||||
else
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
||||
else
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
|
||||
if test -z $PIC_BUILD; then
|
||||
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
||||
CFLAGS+=" -DZLIB"
|
||||
fi
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
||||
CFLAGS+=" -DZLIB"
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||
# location of bzip headers and libraries
|
||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
||||
CFLAGS+=" -DBZIP2"
|
||||
fi
|
||||
# location of bzip headers and libraries
|
||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
||||
CFLAGS+=" -DBZIP2"
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
||||
CFLAGS+=" -DLZ4"
|
||||
fi
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
||||
CFLAGS+=" -DLZ4"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
||||
else
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DZSTD -DZSTD_STATIC_LINKING_ONLY"
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
||||
else
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DZSTD -DZSTD_STATIC_LINKING_ONLY"
|
||||
|
||||
# location of gflags headers and libraries
|
||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||
@ -96,10 +86,9 @@ else
|
||||
fi
|
||||
CFLAGS+=" -DTBB"
|
||||
|
||||
test "$USE_SSE" || USE_SSE=1
|
||||
export USE_SSE
|
||||
test "$PORTABLE" || PORTABLE=1
|
||||
export PORTABLE
|
||||
# use Intel SSE support for checksum calculations
|
||||
export USE_SSE=1
|
||||
export PORTABLE=1
|
||||
|
||||
BINUTILS="$BINUTILS_BASE/bin"
|
||||
AR="$BINUTILS/ar"
|
||||
@ -119,7 +108,6 @@ if [ -z "$USE_CLANG" ]; then
|
||||
# gcc
|
||||
CC="$GCC_BASE/bin/gcc"
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
AR="$GCC_BASE/bin/gcc-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS/gold"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
@ -130,7 +118,6 @@ else
|
||||
CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
|
||||
CC="$CLANG_BIN/clang"
|
||||
CXX="$CLANG_BIN/clang++"
|
||||
AR="$CLANG_BIN/llvm-ar"
|
||||
|
||||
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
|
||||
|
||||
|
117
build_tools/fbcode_config4.8.1.sh
Normal file
117
build_tools/fbcode_config4.8.1.sh
Normal file
@ -0,0 +1,117 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Set environment variables so that we can compile rocksdb using
|
||||
# fbcode settings. It uses the latest g++ compiler and also
|
||||
# uses jemalloc
|
||||
|
||||
BASEDIR=`dirname $BASH_SOURCE`
|
||||
source "$BASEDIR/dependencies_4.8.1.sh"
|
||||
|
||||
# location of libgcc
|
||||
LIBGCC_INCLUDE="$LIBGCC_BASE/include"
|
||||
LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
|
||||
|
||||
# location of glibc
|
||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||
|
||||
# location of snappy headers and libraries
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include"
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
||||
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
||||
|
||||
# location of bzip headers and libraries
|
||||
BZIP2_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP2_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
||||
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
||||
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include"
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
||||
|
||||
# location of gflags headers and libraries
|
||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
|
||||
|
||||
# location of jemalloc
|
||||
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include"
|
||||
JEMALLOC_LIB="$JEMALLOC_BASE/lib/libjemalloc.a"
|
||||
|
||||
# location of numa
|
||||
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||
NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
|
||||
|
||||
# location of libunwind
|
||||
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
|
||||
|
||||
# location of tbb
|
||||
TBB_INCLUDE=" -isystem $TBB_BASE/include/"
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb.a"
|
||||
|
||||
# use Intel SSE support for checksum calculations
|
||||
export USE_SSE=1
|
||||
export PORTABLE=1
|
||||
|
||||
BINUTILS="$BINUTILS_BASE/bin"
|
||||
AR="$BINUTILS/ar"
|
||||
|
||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP2_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
|
||||
|
||||
STDLIBS="-L $GCC_BASE/lib64"
|
||||
|
||||
if [ -z "$USE_CLANG" ]; then
|
||||
# gcc
|
||||
CC="$GCC_BASE/bin/gcc"
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
|
||||
CFLAGS="-B$BINUTILS/gold -m64 -mtune=generic"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
JEMALLOC=1
|
||||
else
|
||||
# clang
|
||||
CLANG_BIN="$CLANG_BASE/bin"
|
||||
CLANG_LIB="$CLANG_BASE/lib"
|
||||
CLANG_INCLUDE="$CLANG_LIB/clang/*/include"
|
||||
CC="$CLANG_BIN/clang"
|
||||
CXX="$CLANG_BIN/clang++"
|
||||
|
||||
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include/"
|
||||
|
||||
CFLAGS="-B$BINUTILS/gold -nostdinc -nostdlib"
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1 "
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1/x86_64-facebook-linux "
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $CLANG_INCLUDE"
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
|
||||
CXXFLAGS="-nostdinc++"
|
||||
fi
|
||||
|
||||
CFLAGS+=" $DEPS_INCLUDE"
|
||||
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42"
|
||||
CFLAGS+=" -DSNAPPY -DGFLAGS=google -DZLIB -DBZIP2 -DLZ4 -DZSTD -DNUMA -DTBB"
|
||||
CXXFLAGS+=" $CFLAGS"
|
||||
|
||||
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
|
||||
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib/ld.so"
|
||||
EXEC_LDFLAGS+=" $LIBUNWIND"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib"
|
||||
# required by libtbb
|
||||
EXEC_LDFLAGS+=" -ldl"
|
||||
|
||||
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
|
||||
|
||||
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS"
|
||||
|
||||
VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||
|
||||
LUA_PATH="$LUA_BASE"
|
||||
|
||||
export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE LUA_PATH
|
@ -9,99 +9,91 @@
|
||||
|
||||
|
||||
BASEDIR=`dirname $BASH_SOURCE`
|
||||
source "$BASEDIR/dependencies_platform009.sh"
|
||||
source "$BASEDIR/dependencies_platform007.sh"
|
||||
|
||||
CFLAGS=""
|
||||
|
||||
# libgcc
|
||||
LIBGCC_INCLUDE="$LIBGCC_BASE/include/c++/9.3.0"
|
||||
LIBGCC_INCLUDE="$LIBGCC_BASE/include/c++/7.3.0"
|
||||
LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
|
||||
|
||||
# glibc
|
||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
MAYBE_PIC=
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
||||
else
|
||||
MAYBE_PIC=_pic
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||
if test -z $PIC_BUILD; then
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz${MAYBE_PIC}.a"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
||||
CFLAGS+=" -DZLIB"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||
# location of bzip headers and libraries
|
||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2${MAYBE_PIC}.a"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
||||
CFLAGS+=" -DBZIP2"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4${MAYBE_PIC}.a"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
||||
CFLAGS+=" -DLZ4"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DZSTD"
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
||||
else
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DZSTD"
|
||||
|
||||
# location of gflags headers and libraries
|
||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags${MAYBE_PIC}.a"
|
||||
if test -z $PIC_BUILD; then
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
|
||||
else
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DGFLAGS=gflags"
|
||||
|
||||
BENCHMARK_INCLUDE=" -I $BENCHMARK_BASE/include/"
|
||||
BENCHMARK_LIBS=" $BENCHMARK_BASE/lib/libbenchmark${MAYBE_PIC}.a"
|
||||
|
||||
BOOST_INCLUDE=" -I $BOOST_BASE/include/"
|
||||
|
||||
# location of jemalloc
|
||||
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
||||
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc${MAYBE_PIC}.a"
|
||||
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc.a"
|
||||
|
||||
# location of numa
|
||||
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||
NUMA_LIB=" $NUMA_BASE/lib/libnuma${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DNUMA"
|
||||
if test -z $PIC_BUILD; then
|
||||
# location of numa
|
||||
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||
NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
|
||||
CFLAGS+=" -DNUMA"
|
||||
|
||||
# location of libunwind
|
||||
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind${MAYBE_PIC}.a"
|
||||
# location of libunwind
|
||||
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
|
||||
fi
|
||||
|
||||
# location of TBB
|
||||
TBB_INCLUDE=" -isystem $TBB_BASE/include/"
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb${MAYBE_PIC}.a"
|
||||
if test -z $PIC_BUILD; then
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb.a"
|
||||
else
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DTBB"
|
||||
|
||||
# location of LIBURING
|
||||
LIBURING_INCLUDE=" -isystem $LIBURING_BASE/include/"
|
||||
LIBURING_LIBS="$LIBURING_BASE/lib/liburing${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DLIBURING"
|
||||
|
||||
test "$USE_SSE" || USE_SSE=1
|
||||
export USE_SSE
|
||||
test "$PORTABLE" || PORTABLE=1
|
||||
export PORTABLE
|
||||
# use Intel SSE support for checksum calculations
|
||||
export USE_SSE=1
|
||||
export PORTABLE=1
|
||||
|
||||
BINUTILS="$BINUTILS_BASE/bin"
|
||||
AR="$BINUTILS/ar"
|
||||
AS="$BINUTILS/as"
|
||||
|
||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE $LIBURING_INCLUDE $BENCHMARK_INCLUDE $BOOST_INCLUDE"
|
||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
|
||||
|
||||
STDLIBS="-L $GCC_BASE/lib64"
|
||||
|
||||
@ -110,15 +102,14 @@ CLANG_LIB="$CLANG_BASE/lib"
|
||||
CLANG_SRC="$CLANG_BASE/../../src"
|
||||
|
||||
CLANG_ANALYZER="$CLANG_BIN/clang++"
|
||||
CLANG_SCAN_BUILD="$CLANG_SRC/llvm/clang/tools/scan-build/bin/scan-build"
|
||||
CLANG_SCAN_BUILD="$CLANG_SRC/llvm/tools/clang/tools/scan-build/bin/scan-build"
|
||||
|
||||
if [ -z "$USE_CLANG" ]; then
|
||||
# gcc
|
||||
CC="$GCC_BASE/bin/gcc"
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
AR="$GCC_BASE/bin/gcc-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS"
|
||||
CFLAGS+=" -B$BINUTILS/gold"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
JEMALLOC=1
|
||||
@ -127,13 +118,12 @@ else
|
||||
CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
|
||||
CC="$CLANG_BIN/clang"
|
||||
CXX="$CLANG_BIN/clang++"
|
||||
AR="$CLANG_BIN/llvm-ar"
|
||||
|
||||
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/9.x "
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/9.x/x86_64-facebook-linux "
|
||||
CFLAGS+=" -B$BINUTILS/gold -nostdinc -nostdlib"
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/7.x "
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/7.x/x86_64-facebook-linux "
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $CLANG_INCLUDE"
|
||||
@ -144,21 +134,20 @@ else
|
||||
fi
|
||||
|
||||
CFLAGS+=" $DEPS_INCLUDE"
|
||||
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42 -DROCKSDB_IOURING_PRESENT"
|
||||
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42"
|
||||
CXXFLAGS+=" $CFLAGS"
|
||||
|
||||
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/platform009/lib/ld.so"
|
||||
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
|
||||
EXEC_LDFLAGS+=" -B$BINUTILS/gold"
|
||||
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/platform007/lib/ld.so"
|
||||
EXEC_LDFLAGS+=" $LIBUNWIND"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/platform009/lib"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=$GCC_BASE/lib64"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/platform007/lib"
|
||||
# required by libtbb
|
||||
EXEC_LDFLAGS+=" -ldl"
|
||||
|
||||
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
|
||||
PLATFORM_LDFLAGS+=" -B$BINUTILS"
|
||||
|
||||
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS"
|
||||
|
||||
VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||
|
||||
@ -166,4 +155,4 @@ VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||
LUA_PATH=
|
||||
LUA_LIB=
|
||||
|
||||
export CC CXX AR AS CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
||||
export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
@ -1,175 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Set environment variables so that we can compile rocksdb using
|
||||
# fbcode settings. It uses the latest g++ and clang compilers and also
|
||||
# uses jemalloc
|
||||
# Environment variables that change the behavior of this script:
|
||||
# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included
|
||||
|
||||
|
||||
BASEDIR=`dirname $BASH_SOURCE`
|
||||
source "$BASEDIR/dependencies_platform010.sh"
|
||||
|
||||
# Disallow using libraries from default locations as they might not be compatible with platform010 libraries.
|
||||
CFLAGS=" --sysroot=/DOES/NOT/EXIST"
|
||||
|
||||
# libgcc
|
||||
LIBGCC_INCLUDE="$LIBGCC_BASE/include/c++/trunk"
|
||||
LIBGCC_LIBS=" -L $LIBGCC_BASE/lib -B$LIBGCC_BASE/lib/gcc/x86_64-facebook-linux/trunk/"
|
||||
|
||||
# glibc
|
||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||
GLIBC_LIBS+=" -B$GLIBC_BASE/lib"
|
||||
|
||||
if test -z $PIC_BUILD; then
|
||||
MAYBE_PIC=
|
||||
else
|
||||
MAYBE_PIC=_pic
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DZLIB"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||
# location of bzip headers and libraries
|
||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DBZIP2"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DLZ4"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DZSTD"
|
||||
fi
|
||||
|
||||
# location of gflags headers and libraries
|
||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DGFLAGS=gflags"
|
||||
|
||||
BENCHMARK_INCLUDE=" -I $BENCHMARK_BASE/include/"
|
||||
BENCHMARK_LIBS=" $BENCHMARK_BASE/lib/libbenchmark${MAYBE_PIC}.a"
|
||||
|
||||
# location of jemalloc
|
||||
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
||||
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc${MAYBE_PIC}.a"
|
||||
|
||||
# location of numa
|
||||
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||
NUMA_LIB=" $NUMA_BASE/lib/libnuma${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DNUMA"
|
||||
|
||||
# location of libunwind
|
||||
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind${MAYBE_PIC}.a"
|
||||
|
||||
# location of TBB
|
||||
TBB_INCLUDE=" -isystem $TBB_BASE/include/"
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DTBB"
|
||||
|
||||
# location of LIBURING
|
||||
LIBURING_INCLUDE=" -isystem $LIBURING_BASE/include/"
|
||||
LIBURING_LIBS="$LIBURING_BASE/lib/liburing${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DLIBURING"
|
||||
|
||||
test "$USE_SSE" || USE_SSE=1
|
||||
export USE_SSE
|
||||
test "$PORTABLE" || PORTABLE=1
|
||||
export PORTABLE
|
||||
|
||||
BINUTILS="$BINUTILS_BASE/bin"
|
||||
AR="$BINUTILS/ar"
|
||||
AS="$BINUTILS/as"
|
||||
|
||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE $LIBURING_INCLUDE $BENCHMARK_INCLUDE"
|
||||
|
||||
STDLIBS="-L $GCC_BASE/lib64"
|
||||
|
||||
CLANG_BIN="$CLANG_BASE/bin"
|
||||
CLANG_LIB="$CLANG_BASE/lib"
|
||||
CLANG_SRC="$CLANG_BASE/../../src"
|
||||
|
||||
CLANG_ANALYZER="$CLANG_BIN/clang++"
|
||||
CLANG_SCAN_BUILD="$CLANG_SRC/llvm/clang/tools/scan-build/bin/scan-build"
|
||||
|
||||
if [ -z "$USE_CLANG" ]; then
|
||||
# gcc
|
||||
CC="$GCC_BASE/bin/gcc"
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
AR="$GCC_BASE/bin/gcc-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||
CFLAGS+=" -I$GCC_BASE/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/install-tools/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include-fixed/"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -I$GLIBC_INCLUDE"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/x86_64-facebook-linux/"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/backward"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE -I$GLIBC_INCLUDE"
|
||||
JEMALLOC=1
|
||||
else
|
||||
# clang
|
||||
CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
|
||||
CC="$CLANG_BIN/clang"
|
||||
CXX="$CLANG_BIN/clang++"
|
||||
AR="$CLANG_BIN/llvm-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/trunk "
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/trunk/x86_64-facebook-linux "
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $CLANG_INCLUDE"
|
||||
CFLAGS+=" -Wno-expansion-to-defined "
|
||||
CXXFLAGS="-nostdinc++"
|
||||
fi
|
||||
|
||||
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
|
||||
|
||||
CFLAGS+=" $DEPS_INCLUDE"
|
||||
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42 -DROCKSDB_IOURING_PRESENT"
|
||||
CXXFLAGS+=" $CFLAGS"
|
||||
|
||||
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/platform010/lib/ld.so"
|
||||
EXEC_LDFLAGS+=" $LIBUNWIND"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/platform010/lib"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=$GCC_BASE/lib64"
|
||||
# required by libtbb
|
||||
EXEC_LDFLAGS+=" -ldl"
|
||||
|
||||
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
|
||||
PLATFORM_LDFLAGS+=" -B$BINUTILS"
|
||||
|
||||
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||
|
||||
VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||
|
||||
export CC CXX AR AS CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
@ -2,101 +2,36 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# If clang_format_diff.py command is not specfied, we assume we are able to
|
||||
# access directly without any path.
|
||||
if [ -z $CLANG_FORMAT_DIFF ]
|
||||
then
|
||||
CLANG_FORMAT_DIFF="clang-format-diff.py"
|
||||
fi
|
||||
|
||||
print_usage () {
|
||||
echo "Usage:"
|
||||
echo "format-diff.sh [OPTIONS]"
|
||||
echo "-c: check only."
|
||||
echo "-h: print this message."
|
||||
}
|
||||
# Check clang-format-diff.py
|
||||
if ! which $CLANG_FORMAT_DIFF &> /dev/null
|
||||
then
|
||||
echo "You didn't have clang-format-diff.py and/or clang-format available in your computer!"
|
||||
echo "You can download clang-format-diff.py by running: "
|
||||
echo " curl --location http://goo.gl/iUW1u2 -o ${CLANG_FORMAT_DIFF}"
|
||||
echo "You can download clang-format by running: "
|
||||
echo " brew install clang-format"
|
||||
echo "Then, move both files (i.e. ${CLANG_FORMAT_DIFF} and clang-format) to some directory within PATH=${PATH}"
|
||||
exit 128
|
||||
fi
|
||||
|
||||
while getopts ':ch' OPTION; do
|
||||
case "$OPTION" in
|
||||
c)
|
||||
CHECK_ONLY=1
|
||||
;;
|
||||
h)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
?)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
# Check argparse, a library that clang-format-diff.py requires.
|
||||
python 2>/dev/null << EOF
|
||||
import argparse
|
||||
EOF
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
|
||||
if [ "$CLANG_FORMAT_DIFF" ]; then
|
||||
echo "Note: CLANG_FORMAT_DIFF='$CLANG_FORMAT_DIFF'"
|
||||
# Dry run to confirm dependencies like argparse
|
||||
if $CLANG_FORMAT_DIFF --help >/dev/null < /dev/null; then
|
||||
true #Good
|
||||
else
|
||||
exit 128
|
||||
fi
|
||||
else
|
||||
# First try directly executing the possibilities
|
||||
if clang-format-diff --help &> /dev/null < /dev/null; then
|
||||
CLANG_FORMAT_DIFF=clang-format-diff
|
||||
elif clang-format-diff.py --help &> /dev/null < /dev/null; then
|
||||
CLANG_FORMAT_DIFF=clang-format-diff.py
|
||||
elif $REPO_ROOT/clang-format-diff.py --help &> /dev/null < /dev/null; then
|
||||
CLANG_FORMAT_DIFF=$REPO_ROOT/clang-format-diff.py
|
||||
else
|
||||
# This probably means we need to directly invoke the interpreter.
|
||||
# But first find clang-format-diff.py
|
||||
if [ -f "$REPO_ROOT/clang-format-diff.py" ]; then
|
||||
CFD_PATH="$REPO_ROOT/clang-format-diff.py"
|
||||
elif which clang-format-diff.py &> /dev/null; then
|
||||
CFD_PATH="$(which clang-format-diff.py)"
|
||||
else
|
||||
echo "You didn't have clang-format-diff.py and/or clang-format available in your computer!"
|
||||
echo "You can download clang-format-diff.py by running: "
|
||||
echo " curl --location https://raw.githubusercontent.com/llvm/llvm-project/main/clang/tools/clang-format/clang-format-diff.py -o ${REPO_ROOT}/clang-format-diff.py"
|
||||
echo "You should make sure the downloaded script is not compromised."
|
||||
echo "You can download clang-format by running:"
|
||||
echo " brew install clang-format"
|
||||
echo " Or"
|
||||
echo " apt install clang-format"
|
||||
echo " This might work too:"
|
||||
echo " yum install git-clang-format"
|
||||
echo "Then make sure clang-format is available and executable from \$PATH:"
|
||||
echo " clang-format --version"
|
||||
exit 128
|
||||
fi
|
||||
# Check argparse pre-req on interpreter, or it will fail
|
||||
if echo import argparse | ${PYTHON:-python3}; then
|
||||
true # Good
|
||||
else
|
||||
echo "To run clang-format-diff.py, we'll need the library "argparse" to be"
|
||||
echo "installed. You can try either of the follow ways to install it:"
|
||||
echo " 1. Manually download argparse: https://pypi.python.org/pypi/argparse"
|
||||
echo " 2. easy_install argparse (if you have easy_install)"
|
||||
echo " 3. pip install argparse (if you have pip)"
|
||||
exit 129
|
||||
fi
|
||||
# Unfortunately, some machines have a Python2 clang-format-diff.py
|
||||
# installed but only a Python3 interpreter installed. Unfortunately,
|
||||
# automatic 2to3 migration is insufficient, so suggest downloading latest.
|
||||
if grep -q "print '" "$CFD_PATH" && \
|
||||
${PYTHON:-python3} --version | grep -q 'ython 3'; then
|
||||
echo "You have clang-format-diff.py for Python 2 but are using a Python 3"
|
||||
echo "interpreter (${PYTHON:-python3})."
|
||||
echo "You can download clang-format-diff.py for Python 3 by running: "
|
||||
echo " curl --location https://raw.githubusercontent.com/llvm/llvm-project/main/clang/tools/clang-format/clang-format-diff.py -o ${REPO_ROOT}/clang-format-diff.py"
|
||||
echo "You should make sure the downloaded script is not compromised."
|
||||
exit 130
|
||||
fi
|
||||
CLANG_FORMAT_DIFF="${PYTHON:-python3} $CFD_PATH"
|
||||
# This had better work after all those checks
|
||||
if $CLANG_FORMAT_DIFF --help >/dev/null < /dev/null; then
|
||||
true #Good
|
||||
else
|
||||
exit 128
|
||||
fi
|
||||
fi
|
||||
if [ "$?" != 0 ]
|
||||
then
|
||||
echo "To run clang-format-diff.py, we'll need the library "argparse" to be"
|
||||
echo "installed. You can try either of the follow ways to install it:"
|
||||
echo " 1. Manually download argparse: https://pypi.python.org/pypi/argparse"
|
||||
echo " 2. easy_install argparse (if you have easy_install)"
|
||||
echo " 3. pip install argparse (if you have pip)"
|
||||
exit 129
|
||||
fi
|
||||
|
||||
# TODO(kailiu) following work is not complete since we still need to figure
|
||||
@ -119,54 +54,34 @@ fi
|
||||
set -e
|
||||
|
||||
uncommitted_code=`git diff HEAD`
|
||||
LAST_MASTER=`git merge-base master HEAD`
|
||||
|
||||
# If there's no uncommitted changes, we assume user are doing post-commit
|
||||
# format check, in which case we'll try to check the modified lines vs. the
|
||||
# facebook/rocksdb.git main branch. Otherwise, we'll check format of the
|
||||
# uncommitted code only.
|
||||
# format check, in which case we'll check the modified lines since last commit
|
||||
# from master. Otherwise, we'll check format of the uncommitted code only.
|
||||
if [ -z "$uncommitted_code" ]
|
||||
then
|
||||
# Attempt to get name of facebook/rocksdb.git remote.
|
||||
[ "$FORMAT_REMOTE" ] || FORMAT_REMOTE="$(LC_ALL=POSIX LANG=POSIX git remote -v | grep 'facebook/rocksdb.git' | head -n 1 | cut -f 1)"
|
||||
# Fall back on 'origin' if that fails
|
||||
[ "$FORMAT_REMOTE" ] || FORMAT_REMOTE=origin
|
||||
# Use main branch from that remote
|
||||
[ "$FORMAT_UPSTREAM" ] || FORMAT_UPSTREAM="$FORMAT_REMOTE/$(LC_ALL=POSIX LANG=POSIX git remote show $FORMAT_REMOTE | sed -n '/HEAD branch/s/.*: //p')"
|
||||
# Get the common ancestor with that remote branch. Everything after that
|
||||
# common ancestor would be considered the contents of a pull request, so
|
||||
# should be relevant for formatting fixes.
|
||||
FORMAT_UPSTREAM_MERGE_BASE="$(git merge-base "$FORMAT_UPSTREAM" HEAD)"
|
||||
# Get the differences
|
||||
diffs=$(git diff -U0 "$FORMAT_UPSTREAM_MERGE_BASE" | $CLANG_FORMAT_DIFF -p 1)
|
||||
echo "Checking format of changes not yet in $FORMAT_UPSTREAM..."
|
||||
# Check the format of last commit
|
||||
diffs=$(git diff -U0 $LAST_MASTER^ | $CLANG_FORMAT_DIFF -p 1)
|
||||
else
|
||||
# Check the format of uncommitted lines,
|
||||
diffs=$(git diff -U0 HEAD | $CLANG_FORMAT_DIFF -p 1)
|
||||
echo "Checking format of uncommitted changes..."
|
||||
fi
|
||||
|
||||
if [ -z "$diffs" ]
|
||||
then
|
||||
echo "Nothing needs to be reformatted!"
|
||||
exit 0
|
||||
elif [ $CHECK_ONLY ]
|
||||
then
|
||||
echo "Your change has unformatted code. Please run make format!"
|
||||
if [ $VERBOSE_CHECK ]; then
|
||||
clang-format --version
|
||||
echo "$diffs"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Highlight the insertion/deletion from the clang-format-diff.py's output
|
||||
COLOR_END="\033[0m"
|
||||
COLOR_RED="\033[0;31m"
|
||||
COLOR_GREEN="\033[0;32m"
|
||||
COLOR_RED="\033[0;31m"
|
||||
COLOR_GREEN="\033[0;32m"
|
||||
|
||||
echo -e "Detect lines that doesn't follow the format rules:\r"
|
||||
# Add the color to the diff. lines added will be green; lines removed will be red.
|
||||
echo "$diffs" |
|
||||
echo "$diffs" |
|
||||
sed -e "s/\(^-.*$\)/`echo -e \"$COLOR_RED\1$COLOR_END\"`/" |
|
||||
sed -e "s/\(^+.*$\)/`echo -e \"$COLOR_GREEN\1$COLOR_END\"`/"
|
||||
|
||||
@ -189,9 +104,9 @@ fi
|
||||
# Do in-place format adjustment.
|
||||
if [ -z "$uncommitted_code" ]
|
||||
then
|
||||
git diff -U0 "$FORMAT_UPSTREAM_MERGE_BASE" | $CLANG_FORMAT_DIFF -i -p 1
|
||||
git diff -U0 $LAST_MASTER^ | $CLANG_FORMAT_DIFF -i -p 1
|
||||
else
|
||||
git diff -U0 HEAD | $CLANG_FORMAT_DIFF -i -p 1
|
||||
git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -i -p 1
|
||||
fi
|
||||
echo "Files reformatted!"
|
||||
|
||||
|
@ -1561,7 +1561,6 @@ sub save_stdin_stdout_stderr {
|
||||
::die_bug("Can't dup STDERR: $!");
|
||||
open $Global::original_stdin, "<&", "STDIN" or
|
||||
::die_bug("Can't dup STDIN: $!");
|
||||
$Global::is_terminal = (-t $Global::original_stderr) && !$ENV{'CIRCLECI'} && !$ENV{'TRAVIS'};
|
||||
}
|
||||
|
||||
sub enough_file_handles {
|
||||
@ -1841,17 +1840,12 @@ sub start_another_job {
|
||||
}
|
||||
}
|
||||
|
||||
$opt::min_progress_interval = 0;
|
||||
|
||||
sub init_progress {
|
||||
# Uses:
|
||||
# $opt::bar
|
||||
# Returns:
|
||||
# list of computers for progress output
|
||||
$|=1;
|
||||
if (not $Global::is_terminal) {
|
||||
$opt::min_progress_interval = 30;
|
||||
}
|
||||
if($opt::bar) {
|
||||
return("","");
|
||||
}
|
||||
@ -1876,9 +1870,6 @@ sub drain_job_queue {
|
||||
}
|
||||
my $last_header="";
|
||||
my $sleep = 0.2;
|
||||
my $last_left = 1000000000;
|
||||
my $last_progress_time = 0;
|
||||
my $ps_reported = 0;
|
||||
do {
|
||||
while($Global::total_running > 0) {
|
||||
debug($Global::total_running, "==", scalar
|
||||
@ -1889,38 +1880,14 @@ sub drain_job_queue {
|
||||
close $job->fh(0,"w");
|
||||
}
|
||||
}
|
||||
# When not connected to terminal, assume CI (e.g. CircleCI). In
|
||||
# that case we want occasional progress output to prevent abort
|
||||
# due to timeout with no output, but we also need to stop sending
|
||||
# progress output if there has been no actual progress, so that
|
||||
# the job can time out appropriately (CirecleCI: 10m) in case of
|
||||
# a hung test. But without special output, it is extremely
|
||||
# annoying to diagnose which test is hung, so we add that using
|
||||
# `ps` below.
|
||||
if($opt::progress and
|
||||
($Global::is_terminal or (time() - $last_progress_time) >= 30)) {
|
||||
if($opt::progress) {
|
||||
my %progress = progress();
|
||||
if($last_header ne $progress{'header'}) {
|
||||
print $Global::original_stderr "\n", $progress{'header'}, "\n";
|
||||
$last_header = $progress{'header'};
|
||||
}
|
||||
if ($Global::is_terminal) {
|
||||
print $Global::original_stderr "\r",$progress{'status'};
|
||||
}
|
||||
if ($last_left > $Global::left) {
|
||||
if (not $Global::is_terminal) {
|
||||
print $Global::original_stderr $progress{'status'},"\n";
|
||||
}
|
||||
$last_progress_time = time();
|
||||
$ps_reported = 0;
|
||||
} elsif (not $ps_reported and (time() - $last_progress_time) >= 60) {
|
||||
# No progress in at least 60 seconds: run ps
|
||||
print $Global::original_stderr "\n";
|
||||
system("ps", "-wf");
|
||||
$ps_reported = 1;
|
||||
}
|
||||
$last_left = $Global::left;
|
||||
flush $Global::original_stderr;
|
||||
print $Global::original_stderr "\r",$progress{'status'};
|
||||
flush $Global::original_stderr;
|
||||
}
|
||||
if($Global::total_running < $Global::max_jobs_running
|
||||
and not $Global::JobQueue->empty()) {
|
||||
@ -1954,7 +1921,7 @@ sub drain_job_queue {
|
||||
not $Global::start_no_new_jobs and not $Global::JobQueue->empty());
|
||||
if($opt::progress) {
|
||||
my %progress = progress();
|
||||
print $Global::original_stderr $opt::progress_sep, $progress{'status'}, "\n";
|
||||
print $Global::original_stderr "\r", $progress{'status'}, "\n";
|
||||
flush $Global::original_stderr;
|
||||
}
|
||||
}
|
||||
@ -1987,11 +1954,10 @@ sub progress {
|
||||
my $eta = "";
|
||||
my ($status,$header)=("","");
|
||||
if($opt::eta) {
|
||||
my($total, $completed, $left, $pctcomplete, $avgtime, $this_eta) =
|
||||
compute_eta();
|
||||
$eta = sprintf("ETA: %ds Left: %d AVG: %.2fs ",
|
||||
$this_eta, $left, $avgtime);
|
||||
$Global::left = $left;
|
||||
my($total, $completed, $left, $pctcomplete, $avgtime, $this_eta) =
|
||||
compute_eta();
|
||||
$eta = sprintf("ETA: %ds Left: %d AVG: %.2fs ",
|
||||
$this_eta, $left, $avgtime);
|
||||
}
|
||||
my $termcols = terminal_columns();
|
||||
my @workers = sort keys %Global::host;
|
||||
@ -5835,7 +5801,7 @@ sub workdir {
|
||||
. "-" . $self->seq();
|
||||
} else {
|
||||
$workdir = $opt::workdir;
|
||||
# Rsync treats /./ special. We don't want that
|
||||
# Rsync treats /./ special. We dont want that
|
||||
$workdir =~ s:/\./:/:g; # Remove /./
|
||||
$workdir =~ s:/+$::; # Remove ending / if any
|
||||
$workdir =~ s:^\./::g; # Remove starting ./ if any
|
||||
|
@ -103,26 +103,31 @@ function main() {
|
||||
gem_install fpm
|
||||
|
||||
make static_lib
|
||||
LIBDIR=/usr/lib
|
||||
if [[ $FPM_OUTPUT = "rpm" ]]; then
|
||||
LIBDIR=$(rpm --eval '%_libdir')
|
||||
fi
|
||||
make install INSTALL_PATH=package
|
||||
|
||||
rm -rf package
|
||||
make install DESTDIR=package PREFIX=/usr LIBDIR=$LIBDIR
|
||||
cd package
|
||||
|
||||
LIB_DIR=lib
|
||||
if [[ -z "$ARCH" ]]; then
|
||||
ARCH=$(getconf LONG_BIT)
|
||||
fi
|
||||
if [[ ("$FPM_OUTPUT" = "rpm") && ($ARCH -eq 64) ]]; then
|
||||
mv lib lib64
|
||||
LIB_DIR=lib64
|
||||
fi
|
||||
|
||||
fpm \
|
||||
-s dir \
|
||||
-t $FPM_OUTPUT \
|
||||
-C package \
|
||||
-n rocksdb \
|
||||
-v $1 \
|
||||
--prefix /usr \
|
||||
--url http://rocksdb.org/ \
|
||||
-m rocksdb@fb.com \
|
||||
--license BSD \
|
||||
--vendor Facebook \
|
||||
--description "RocksDB is an embeddable persistent key-value store for fast storage." \
|
||||
usr
|
||||
include $LIB_DIR
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2068
|
||||
|
209
build_tools/precommit_checker.py
Executable file
209
build_tools/precommit_checker.py
Executable file
@ -0,0 +1,209 @@
|
||||
#!/usr/local/fbcode/gcc-4.9-glibc-2.20-fb/bin/python2.7
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
import argparse
|
||||
import commands
|
||||
import subprocess
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
#
|
||||
# Simple logger
|
||||
#
|
||||
|
||||
class Log:
|
||||
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
self.f = open(self.filename, 'w+', 0)
|
||||
|
||||
def caption(self, str):
|
||||
line = "\n##### %s #####\n" % str
|
||||
if self.f:
|
||||
self.f.write("%s \n" % line)
|
||||
else:
|
||||
print(line)
|
||||
|
||||
def error(self, str):
|
||||
data = "\n\n##### ERROR ##### %s" % str
|
||||
if self.f:
|
||||
self.f.write("%s \n" % data)
|
||||
else:
|
||||
print(data)
|
||||
|
||||
def log(self, str):
|
||||
if self.f:
|
||||
self.f.write("%s \n" % str)
|
||||
else:
|
||||
print(str)
|
||||
|
||||
#
|
||||
# Shell Environment
|
||||
#
|
||||
|
||||
|
||||
class Env(object):
|
||||
|
||||
def __init__(self, logfile, tests):
|
||||
self.tests = tests
|
||||
self.log = Log(logfile)
|
||||
|
||||
def shell(self, cmd, path=os.getcwd()):
|
||||
if path:
|
||||
os.chdir(path)
|
||||
|
||||
self.log.log("==== shell session ===========================")
|
||||
self.log.log("%s> %s" % (path, cmd))
|
||||
status = subprocess.call("cd %s; %s" % (path, cmd), shell=True,
|
||||
stdout=self.log.f, stderr=self.log.f)
|
||||
self.log.log("status = %s" % status)
|
||||
self.log.log("============================================== \n\n")
|
||||
return status
|
||||
|
||||
def GetOutput(self, cmd, path=os.getcwd()):
|
||||
if path:
|
||||
os.chdir(path)
|
||||
|
||||
self.log.log("==== shell session ===========================")
|
||||
self.log.log("%s> %s" % (path, cmd))
|
||||
status, out = commands.getstatusoutput(cmd)
|
||||
self.log.log("status = %s" % status)
|
||||
self.log.log("out = %s" % out)
|
||||
self.log.log("============================================== \n\n")
|
||||
return status, out
|
||||
|
||||
#
|
||||
# Pre-commit checker
|
||||
#
|
||||
|
||||
|
||||
class PreCommitChecker(Env):
|
||||
|
||||
def __init__(self, args):
|
||||
Env.__init__(self, args.logfile, args.tests)
|
||||
self.ignore_failure = args.ignore_failure
|
||||
|
||||
#
|
||||
# Get commands for a given job from the determinator file
|
||||
#
|
||||
def get_commands(self, test):
|
||||
status, out = self.GetOutput(
|
||||
"RATIO=1 build_tools/rocksdb-lego-determinator %s" % test, ".")
|
||||
return status, out
|
||||
|
||||
#
|
||||
# Run a specific CI job
|
||||
#
|
||||
def run_test(self, test):
|
||||
self.log.caption("Running test %s locally" % test)
|
||||
|
||||
# get commands for the CI job determinator
|
||||
status, cmds = self.get_commands(test)
|
||||
if status != 0:
|
||||
self.log.error("Error getting commands for test %s" % test)
|
||||
return False
|
||||
|
||||
# Parse the JSON to extract the commands to run
|
||||
cmds = re.findall("'shell':'([^\']*)'", cmds)
|
||||
|
||||
if len(cmds) == 0:
|
||||
self.log.log("No commands found")
|
||||
return False
|
||||
|
||||
# Run commands
|
||||
for cmd in cmds:
|
||||
# Replace J=<..> with the local environment variable
|
||||
if "J" in os.environ:
|
||||
cmd = cmd.replace("J=1", "J=%s" % os.environ["J"])
|
||||
cmd = cmd.replace("make ", "make -j%s " % os.environ["J"])
|
||||
# Run the command
|
||||
status = self.shell(cmd, ".")
|
||||
if status != 0:
|
||||
self.log.error("Error running command %s for test %s"
|
||||
% (cmd, test))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
#
|
||||
# Run specified CI jobs
|
||||
#
|
||||
def run_tests(self):
|
||||
if not self.tests:
|
||||
self.log.error("Invalid args. Please provide tests")
|
||||
return False
|
||||
|
||||
self.print_separator()
|
||||
self.print_row("TEST", "RESULT")
|
||||
self.print_separator()
|
||||
|
||||
result = True
|
||||
for test in self.tests:
|
||||
start_time = time.time()
|
||||
self.print_test(test)
|
||||
result = self.run_test(test)
|
||||
elapsed_min = (time.time() - start_time) / 60
|
||||
if not result:
|
||||
self.log.error("Error running test %s" % test)
|
||||
self.print_result("FAIL (%dm)" % elapsed_min)
|
||||
if not self.ignore_failure:
|
||||
return False
|
||||
result = False
|
||||
else:
|
||||
self.print_result("PASS (%dm)" % elapsed_min)
|
||||
|
||||
self.print_separator()
|
||||
return result
|
||||
|
||||
#
|
||||
# Print a line
|
||||
#
|
||||
def print_separator(self):
|
||||
print("".ljust(60, "-"))
|
||||
|
||||
#
|
||||
# Print two colums
|
||||
#
|
||||
def print_row(self, c0, c1):
|
||||
print("%s%s" % (c0.ljust(40), c1.ljust(20)))
|
||||
|
||||
def print_test(self, test):
|
||||
print(test.ljust(40), end="")
|
||||
sys.stdout.flush()
|
||||
|
||||
def print_result(self, result):
|
||||
print(result.ljust(20))
|
||||
|
||||
#
|
||||
# Main
|
||||
#
|
||||
parser = argparse.ArgumentParser(description='RocksDB pre-commit checker.')
|
||||
|
||||
# --log <logfile>
|
||||
parser.add_argument('--logfile', default='/tmp/precommit-check.log',
|
||||
help='Log file. Default is /tmp/precommit-check.log')
|
||||
# --ignore_failure
|
||||
parser.add_argument('--ignore_failure', action='store_true', default=False,
|
||||
help='Stop when an error occurs')
|
||||
# <test ....>
|
||||
parser.add_argument('tests', nargs='+',
|
||||
help='CI test(s) to run. e.g: unit punit asan tsan ubsan')
|
||||
|
||||
args = parser.parse_args()
|
||||
checker = PreCommitChecker(args)
|
||||
|
||||
print("Please follow log %s" % checker.log.filename)
|
||||
|
||||
if not checker.run_tests():
|
||||
print("Error running tests. Please check log file %s"
|
||||
% checker.log.filename)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
@ -20,11 +20,26 @@ STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
|
||||
|
||||
function cleanup {
|
||||
rm -rf $DATA_DIR
|
||||
rm -f $STAT_FILE.*
|
||||
rm -f $STAT_FILE.fillseq
|
||||
rm -f $STAT_FILE.readrandom
|
||||
rm -f $STAT_FILE.overwrite
|
||||
rm -f $STAT_FILE.memtablefillreadrandom
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
if [ -z $GIT_BRANCH ]; then
|
||||
git_br=`git rev-parse --abbrev-ref HEAD`
|
||||
else
|
||||
git_br=$(basename $GIT_BRANCH)
|
||||
fi
|
||||
|
||||
if [ $git_br == "master" ]; then
|
||||
git_br=""
|
||||
else
|
||||
git_br="."$git_br
|
||||
fi
|
||||
|
||||
make release
|
||||
|
||||
# measure fillseq + fill up the DB for overwrite benchmark
|
||||
@ -258,6 +273,7 @@ common_in_mem_args="--db=/dev/shm/rocksdb \
|
||||
--value_size=100 \
|
||||
--compression_type=none \
|
||||
--compression_ratio=1 \
|
||||
--hard_rate_limit=2 \
|
||||
--write_buffer_size=134217728 \
|
||||
--max_write_buffer_number=4 \
|
||||
--level0_file_num_compaction_trigger=8 \
|
||||
@ -270,10 +286,12 @@ common_in_mem_args="--db=/dev/shm/rocksdb \
|
||||
--sync=0 \
|
||||
--verify_checksum=1 \
|
||||
--delete_obsolete_files_period_micros=314572800 \
|
||||
--max_grandparent_overlap_factor=10 \
|
||||
--use_plain_table=1 \
|
||||
--open_files=-1 \
|
||||
--mmap_read=1 \
|
||||
--mmap_write=0 \
|
||||
--memtablerep=prefix_hash \
|
||||
--bloom_bits=10 \
|
||||
--bloom_locality=1 \
|
||||
--perf_level=0"
|
||||
@ -360,7 +378,7 @@ function send_to_ods {
|
||||
echo >&2 "ERROR: Key $key doesn't have a value."
|
||||
return
|
||||
fi
|
||||
curl --silent "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=$key&value=$value" \
|
||||
curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
|
||||
--connect-timeout 60
|
||||
}
|
||||
|
||||
|
911
build_tools/rocksdb-lego-determinator
Executable file
911
build_tools/rocksdb-lego-determinator
Executable file
@ -0,0 +1,911 @@
|
||||
#!/usr/bin/env bash
|
||||
# This script is executed by Sandcastle
|
||||
# to determine next steps to run
|
||||
|
||||
# Usage:
|
||||
# EMAIL=<email> ONCALL=<email> TRIGGER=<trigger> SUBSCRIBER=<email> rocks_ci.py <test-name>
|
||||
#
|
||||
# Input Value
|
||||
# -------------------------------------------------------------------------
|
||||
# EMAIL Email address to report on trigger conditions
|
||||
# ONCALL Email address to raise a task on failure
|
||||
# TRIGGER Trigger conditions for email. Valid values are fail, warn, all
|
||||
# SUBSCRIBER Email addresss to add as subscriber for task
|
||||
#
|
||||
|
||||
#
|
||||
# Report configuration
|
||||
#
|
||||
REPORT_EMAIL=
|
||||
if [ ! -z $EMAIL ]; then
|
||||
if [ -z $TRIGGER ]; then
|
||||
TRIGGER="fail"
|
||||
fi
|
||||
|
||||
REPORT_EMAIL="
|
||||
{
|
||||
'type':'email',
|
||||
'triggers': [ '$TRIGGER' ],
|
||||
'emails':['$EMAIL']
|
||||
},"
|
||||
fi
|
||||
|
||||
CREATE_TASK=
|
||||
if [ ! -z $ONCALL ]; then
|
||||
CREATE_TASK="
|
||||
{
|
||||
'type':'task',
|
||||
'triggers':[ 'fail' ],
|
||||
'priority':0,
|
||||
'subscribers':[ '$SUBSCRIBER' ],
|
||||
'tags':[ 'rocksdb', 'ci' ],
|
||||
},"
|
||||
fi
|
||||
|
||||
# For now, create the tasks using only the dedicated task creation tool.
|
||||
CREATE_TASK=
|
||||
|
||||
REPORT=
|
||||
if [[ ! -z $REPORT_EMAIL || ! -z $CREATE_TASK ]]; then
|
||||
REPORT="'report': [
|
||||
$REPORT_EMAIL
|
||||
$CREATE_TASK
|
||||
]"
|
||||
fi
|
||||
|
||||
#
|
||||
# Helper variables
|
||||
#
|
||||
CLEANUP_ENV="
|
||||
{
|
||||
'name':'Cleanup environment',
|
||||
'shell':'rm -rf /dev/shm/rocksdb && mkdir /dev/shm/rocksdb && (chmod +t /dev/shm || true) && make clean',
|
||||
'user':'root'
|
||||
}"
|
||||
|
||||
# We will eventually set the RATIO to 1, but we want do this
|
||||
# in steps. RATIO=$(nproc) will make it work as J=1
|
||||
if [ -z $RATIO ]; then
|
||||
RATIO=$(nproc)
|
||||
fi
|
||||
|
||||
if [ -z $PARALLEL_J ]; then
|
||||
PARALLEL_J="J=$(expr $(nproc) / ${RATIO})"
|
||||
fi
|
||||
|
||||
if [ -z $PARALLEL_j ]; then
|
||||
PARALLEL_j="-j$(expr $(nproc) / ${RATIO})"
|
||||
fi
|
||||
|
||||
PARALLELISM="$PARALLEL_J $PARALLEL_j"
|
||||
|
||||
DEBUG="OPT=-g"
|
||||
SHM="TEST_TMPDIR=/dev/shm/rocksdb"
|
||||
NON_SHM="TMPD=/tmp/rocksdb_test_tmp"
|
||||
GCC_481="ROCKSDB_FBCODE_BUILD_WITH_481=1"
|
||||
ASAN="COMPILE_WITH_ASAN=1"
|
||||
CLANG="USE_CLANG=1"
|
||||
# in gcc-5 there are known problems with TSAN like https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71090.
|
||||
# using platform007 gives us gcc-8 or higher which has that bug fixed.
|
||||
TSAN="ROCKSDB_FBCODE_BUILD_WITH_PLATFORM007=1 COMPILE_WITH_TSAN=1"
|
||||
UBSAN="COMPILE_WITH_UBSAN=1"
|
||||
TSAN_CRASH='CRASH_TEST_EXT_ARGS="--compression_type=zstd --log2_keys_per_lock=22"'
|
||||
NON_TSAN_CRASH="CRASH_TEST_EXT_ARGS=--compression_type=zstd"
|
||||
DISABLE_JEMALLOC="DISABLE_JEMALLOC=1"
|
||||
HTTP_PROXY="https_proxy=http://fwdproxy.29.prn1:8080 http_proxy=http://fwdproxy.29.prn1:8080 ftp_proxy=http://fwdproxy.29.prn1:8080"
|
||||
SETUP_JAVA_ENV="export $HTTP_PROXY; export JAVA_HOME=/usr/local/jdk-8u60-64/; export PATH=\$JAVA_HOME/bin:\$PATH"
|
||||
PARSER="'parser':'python build_tools/error_filter.py $1'"
|
||||
|
||||
CONTRUN_NAME="ROCKSDB_CONTRUN_NAME"
|
||||
|
||||
# This code is getting called under various scenarios. What we care about is to
|
||||
# understand when it's called from nightly contruns because in that case we'll
|
||||
# create tasks for any failures. To follow the existing pattern, we'll check
|
||||
# the value of $ONCALL. If it's a diff then just call `false` to make sure
|
||||
# that errors will be properly propagated to the caller.
|
||||
if [ ! -z $ONCALL ]; then
|
||||
TASK_CREATION_TOOL="/usr/local/bin/mysql_mtr_filter --rocksdb --oncall $ONCALL"
|
||||
else
|
||||
TASK_CREATION_TOOL="false"
|
||||
fi
|
||||
|
||||
ARTIFACTS=" 'artifacts': [
|
||||
{
|
||||
'name':'database',
|
||||
'paths':[ '/dev/shm/rocksdb' ],
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# A mechanism to disable tests temporarily
|
||||
#
|
||||
DISABLE_COMMANDS="[
|
||||
{
|
||||
'name':'Disable test',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
{
|
||||
'name':'Job disabled. Please contact test owner',
|
||||
'shell':'exit 1',
|
||||
'user':'root'
|
||||
},
|
||||
],
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB unit test
|
||||
#
|
||||
UNIT_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and test RocksDB debug version',
|
||||
'shell':'$SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB unit test not under /dev/shm
|
||||
#
|
||||
UNIT_TEST_NON_SHM_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and test RocksDB debug version',
|
||||
'timeout': 86400,
|
||||
'shell':'$NON_SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=non_shm_check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB release build and unit tests
|
||||
#
|
||||
RELEASE_BUILD_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Release Build',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build RocksDB release',
|
||||
'shell':'make $PARALLEL_j release || $CONTRUN_NAME=release $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB unit test on gcc-4.8.1
|
||||
#
|
||||
UNIT_TEST_COMMANDS_481="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test on GCC 4.8.1',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and test RocksDB debug version',
|
||||
'shell':'$SHM $GCC_481 $DEBUG make $PARALLELISM check || $CONTRUN_NAME=unit_gcc_481_check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB release build and unit tests
|
||||
#
|
||||
RELEASE_BUILD_COMMANDS_481="[
|
||||
{
|
||||
'name':'Rocksdb Release on GCC 4.8.1',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build RocksDB release on GCC 4.8.1',
|
||||
'shell':'$GCC_481 make $PARALLEL_j release || $CONTRUN_NAME=release_gcc481 $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB unit test with CLANG
|
||||
#
|
||||
CLANG_UNIT_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and test RocksDB debug',
|
||||
'shell':'$CLANG $SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=clang_check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB release build with CLANG
|
||||
#
|
||||
CLANG_RELEASE_BUILD_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb CLANG Release Build',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build RocksDB release',
|
||||
'shell':'$CLANG make $PARALLEL_j release|| $CONTRUN_NAME=clang_release $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB analyze
|
||||
#
|
||||
CLANG_ANALYZE_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb analyze',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'RocksDB build and analyze',
|
||||
'shell':'$CLANG $SHM $DEBUG make $PARALLEL_j analyze || $CONTRUN_NAME=clang_analyze $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB code coverage
|
||||
#
|
||||
CODE_COV_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test Code Coverage',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build, test and collect code coverage info',
|
||||
'shell':'$SHM $DEBUG make $PARALLELISM coverage || $CONTRUN_NAME=coverage $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB unity
|
||||
#
|
||||
UNITY_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unity',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build, test unity test',
|
||||
'shell':'$SHM $DEBUG V=1 make J=1 unity_test || $CONTRUN_NAME=unity_test $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# Build RocksDB lite
|
||||
#
|
||||
LITE_BUILD_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Lite build',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build RocksDB debug version',
|
||||
'shell':'make J=1 LITE=1 all check || $CONTRUN_NAME=lite $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# Report RocksDB lite binary size to scuba
|
||||
REPORT_LITE_BINARY_SIZE_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Lite Binary Size',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Report RocksDB Lite binary size to scuba',
|
||||
'shell':'tools/report_lite_binary_size.sh',
|
||||
'user':'root',
|
||||
},
|
||||
],
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB stress/crash test
|
||||
#
|
||||
STRESS_CRASH_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Stress/Crash Test',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB debug stress tests',
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 db_stress || $CONTRUN_NAME=db_stress $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
{
|
||||
'name':'Build and run RocksDB debug crash tests',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 crash_test || $CONTRUN_NAME=crash_test $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
}
|
||||
],
|
||||
$ARTIFACTS,
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB stress/crash test with atomic flush
|
||||
#
|
||||
STRESS_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Stress/Crash Test (atomic flush)',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB debug stress tests',
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 db_stress || $CONTRUN_NAME=db_stress $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
{
|
||||
'name':'Build and run RocksDB debug crash tests with atomic flush',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 crash_test_with_atomic_flush || $CONTRUN_NAME=crash_test_with_atomic_flush $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
}
|
||||
],
|
||||
$ARTIFACTS,
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
# RocksDB write stress test.
|
||||
# We run on disk device on purpose (i.e. no $SHM)
|
||||
# because we want to add some randomness to fsync commands
|
||||
WRITE_STRESS_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Write Stress Test',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB write stress tests',
|
||||
'shell':'make write_stress && python tools/write_stress_runner.py --runtime_sec=3600 --db=/tmp/rocksdb_write_stress || $CONTRUN_NAME=write_stress $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
}
|
||||
],
|
||||
'artifacts': [{'name': 'database', 'paths': ['/tmp/rocksdb_write_stress']}],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
|
||||
#
|
||||
# RocksDB test under address sanitizer
|
||||
#
|
||||
ASAN_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test under ASAN',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Test RocksDB debug under ASAN',
|
||||
'shell':'set -o pipefail && ($SHM $ASAN $DEBUG make $PARALLELISM asan_check || $CONTRUN_NAME=asan_check $TASK_CREATION_TOOL) |& /usr/facebook/ops/scripts/asan_symbolize.py -d',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
}
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB crash testing under address sanitizer
|
||||
#
|
||||
ASAN_CRASH_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb crash test under ASAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB debug asan_crash_test',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 asan_crash_test || $CONTRUN_NAME=asan_crash_test $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB crash testing with atomic flush under address sanitizer
|
||||
#
|
||||
ASAN_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb crash test (atomic flush) under ASAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB debug asan_crash_test_with_atomic_flush',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 asan_crash_test_with_atomic_flush || $CONTRUN_NAME=asan_crash_test_with_atomic_flush $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB test under undefined behavior sanitizer
|
||||
#
|
||||
UBSAN_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test under UBSAN',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Test RocksDB debug under UBSAN',
|
||||
'shell':'set -o pipefail && $SHM $UBSAN $DEBUG make $PARALLELISM ubsan_check || $CONTRUN_NAME=ubsan_check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
}
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB crash testing under udnefined behavior sanitizer
|
||||
#
|
||||
UBSAN_CRASH_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb crash test under UBSAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB debug ubsan_crash_test',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 ubsan_crash_test || $CONTRUN_NAME=ubsan_crash_test $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB crash testing with atomic flush under undefined behavior sanitizer
|
||||
#
|
||||
UBSAN_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb crash test (atomic flush) under UBSAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build and run RocksDB debug ubsan_crash_test_with_atomic_flush',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG $NON_TSAN_CRASH make J=1 ubsan_crash_test_with_atomic_flush || $CONTRUN_NAME=ubsan_crash_test_with_atomic_flush $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB unit test under valgrind
|
||||
#
|
||||
VALGRIND_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test under valgrind',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Run RocksDB debug unit tests',
|
||||
'timeout': 86400,
|
||||
'shell':'$SHM $DEBUG make $PARALLELISM valgrind_test || $CONTRUN_NAME=valgrind_check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB test under TSAN
|
||||
#
|
||||
TSAN_UNIT_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Unit Test under TSAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Run RocksDB debug unit test',
|
||||
'timeout': 86400,
|
||||
'shell':'set -o pipefail && $SHM $DEBUG $TSAN make $PARALLELISM check || $CONTRUN_NAME=tsan_check $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB crash test under TSAN
|
||||
#
|
||||
TSAN_CRASH_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Crash Test under TSAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Compile and run',
|
||||
'timeout': 86400,
|
||||
'shell':'set -o pipefail && $SHM $DEBUG $TSAN $TSAN_CRASH CRASH_TEST_KILL_ODD=1887 make J=1 crash_test || $CONTRUN_NAME=tsan_crash_test $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB crash test with atomic flush under TSAN
|
||||
#
|
||||
TSAN_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Crash Test with atomic flush under TSAN',
|
||||
'oncall':'$ONCALL',
|
||||
'timeout': 86400,
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Compile and run',
|
||||
'timeout': 86400,
|
||||
'shell':'set -o pipefail && $SHM $DEBUG $TSAN $TSAN_CRASH CRASH_TEST_KILL_ODD=1887 make J=1 crash_test_with_atomic_flush || $CONTRUN_NAME=tsan_crash_test_with_atomic_flush $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB format compatible
|
||||
#
|
||||
|
||||
run_format_compatible()
|
||||
{
|
||||
export TEST_TMPDIR=/dev/shm/rocksdb
|
||||
rm -rf /dev/shm/rocksdb
|
||||
mkdir /dev/shm/rocksdb
|
||||
|
||||
tools/check_format_compatible.sh
|
||||
}
|
||||
|
||||
FORMAT_COMPATIBLE_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Format Compatible tests',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Run RocksDB debug unit test',
|
||||
'shell':'build_tools/rocksdb-lego-determinator run_format_compatible || $CONTRUN_NAME=run_format_compatible $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB no compression
|
||||
#
|
||||
run_no_compression()
|
||||
{
|
||||
export TEST_TMPDIR=/dev/shm/rocksdb
|
||||
rm -rf /dev/shm/rocksdb
|
||||
mkdir /dev/shm/rocksdb
|
||||
make clean
|
||||
cat build_tools/fbcode_config.sh | grep -iv dzstd | grep -iv dzlib | grep -iv dlz4 | grep -iv dsnappy | grep -iv dbzip2 > .tmp.fbcode_config.sh
|
||||
mv .tmp.fbcode_config.sh build_tools/fbcode_config.sh
|
||||
cat Makefile | grep -v tools/ldb_test.py > .tmp.Makefile
|
||||
mv .tmp.Makefile Makefile
|
||||
make $DEBUG J=1 check
|
||||
}
|
||||
|
||||
NO_COMPRESSION_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb No Compression tests',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Run RocksDB debug unit test',
|
||||
'shell':'build_tools/rocksdb-lego-determinator run_no_compression || $CONTRUN_NAME=run_no_compression $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB regression
|
||||
#
|
||||
run_regression()
|
||||
{
|
||||
time -v bash -vx ./build_tools/regression_build_test.sh $(mktemp -d $WORKSPACE/leveldb.XXXX) $(mktemp leveldb_test_stats.XXXX)
|
||||
|
||||
# ======= report size to ODS ========
|
||||
|
||||
# parameters: $1 -- key, $2 -- value
|
||||
function send_size_to_ods {
|
||||
curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=rocksdb.build_size.$1&value=$2" \
|
||||
--connect-timeout 60
|
||||
}
|
||||
|
||||
# === normal build ===
|
||||
make clean
|
||||
make -j$(nproc) static_lib
|
||||
send_size_to_ods static_lib $(stat --printf="%s" librocksdb.a)
|
||||
strip librocksdb.a
|
||||
send_size_to_ods static_lib_stripped $(stat --printf="%s" librocksdb.a)
|
||||
|
||||
make -j$(nproc) shared_lib
|
||||
send_size_to_ods shared_lib $(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||
strip `readlink -f librocksdb.so`
|
||||
send_size_to_ods shared_lib_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||
|
||||
# === lite build ===
|
||||
make clean
|
||||
make LITE=1 -j$(nproc) static_lib
|
||||
send_size_to_ods static_lib_lite $(stat --printf="%s" librocksdb.a)
|
||||
strip librocksdb.a
|
||||
send_size_to_ods static_lib_lite_stripped $(stat --printf="%s" librocksdb.a)
|
||||
|
||||
make LITE=1 -j$(nproc) shared_lib
|
||||
send_size_to_ods shared_lib_lite $(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||
strip `readlink -f librocksdb.so`
|
||||
send_size_to_ods shared_lib_lite_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)
|
||||
}
|
||||
|
||||
REGRESSION_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb regression commands',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Make and run script',
|
||||
'shell':'build_tools/rocksdb-lego-determinator run_regression || $CONTRUN_NAME=run_regression $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
#
|
||||
# RocksDB Java build
|
||||
#
|
||||
JAVA_BUILD_TEST_COMMANDS="[
|
||||
{
|
||||
'name':'Rocksdb Java Build',
|
||||
'oncall':'$ONCALL',
|
||||
'steps': [
|
||||
$CLEANUP_ENV,
|
||||
{
|
||||
'name':'Build RocksDB for Java',
|
||||
'shell':'$SETUP_JAVA_ENV; $SHM make rocksdbjava || $CONTRUN_NAME=rocksdbjava $TASK_CREATION_TOOL',
|
||||
'user':'root',
|
||||
$PARSER
|
||||
},
|
||||
],
|
||||
$REPORT
|
||||
}
|
||||
]"
|
||||
|
||||
|
||||
case $1 in
|
||||
unit)
|
||||
echo $UNIT_TEST_COMMANDS
|
||||
;;
|
||||
unit_non_shm)
|
||||
echo $UNIT_TEST_NON_SHM_COMMANDS
|
||||
;;
|
||||
release)
|
||||
echo $RELEASE_BUILD_COMMANDS
|
||||
;;
|
||||
unit_481)
|
||||
echo $UNIT_TEST_COMMANDS_481
|
||||
;;
|
||||
release_481)
|
||||
echo $RELEASE_BUILD_COMMANDS_481
|
||||
;;
|
||||
clang_unit)
|
||||
echo $CLANG_UNIT_TEST_COMMANDS
|
||||
;;
|
||||
clang_release)
|
||||
echo $CLANG_RELEASE_BUILD_COMMANDS
|
||||
;;
|
||||
clang_analyze)
|
||||
echo $CLANG_ANALYZE_COMMANDS
|
||||
;;
|
||||
code_cov)
|
||||
echo $CODE_COV_COMMANDS
|
||||
;;
|
||||
unity)
|
||||
echo $UNITY_COMMANDS
|
||||
;;
|
||||
lite)
|
||||
echo $LITE_BUILD_COMMANDS
|
||||
;;
|
||||
report_lite_binary_size)
|
||||
echo $REPORT_LITE_BINARY_SIZE_COMMANDS
|
||||
;;
|
||||
stress_crash)
|
||||
echo $STRESS_CRASH_TEST_COMMANDS
|
||||
;;
|
||||
stress_crash_with_atomic_flush)
|
||||
echo $STRESS_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS
|
||||
;;
|
||||
write_stress)
|
||||
echo $WRITE_STRESS_COMMANDS
|
||||
;;
|
||||
asan)
|
||||
echo $ASAN_TEST_COMMANDS
|
||||
;;
|
||||
asan_crash)
|
||||
echo $ASAN_CRASH_TEST_COMMANDS
|
||||
;;
|
||||
asan_crash_with_atomic_flush)
|
||||
echo $ASAN_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS
|
||||
;;
|
||||
ubsan)
|
||||
echo $UBSAN_TEST_COMMANDS
|
||||
;;
|
||||
ubsan_crash)
|
||||
echo $UBSAN_CRASH_TEST_COMMANDS
|
||||
;;
|
||||
ubsan_crash_with_atomic_flush)
|
||||
echo $UBSAN_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS
|
||||
;;
|
||||
valgrind)
|
||||
echo $VALGRIND_TEST_COMMANDS
|
||||
;;
|
||||
tsan)
|
||||
echo $TSAN_UNIT_TEST_COMMANDS
|
||||
;;
|
||||
tsan_crash)
|
||||
echo $TSAN_CRASH_TEST_COMMANDS
|
||||
;;
|
||||
tsan_crash_with_atomic_flush)
|
||||
echo $TSAN_CRASH_TEST_WITH_ATOMIC_FLUSH_COMMANDS
|
||||
;;
|
||||
format_compatible)
|
||||
echo $FORMAT_COMPATIBLE_COMMANDS
|
||||
;;
|
||||
run_format_compatible)
|
||||
run_format_compatible
|
||||
;;
|
||||
no_compression)
|
||||
echo $NO_COMPRESSION_COMMANDS
|
||||
;;
|
||||
run_no_compression)
|
||||
run_no_compression
|
||||
;;
|
||||
regression)
|
||||
echo $REGRESSION_COMMANDS
|
||||
;;
|
||||
run_regression)
|
||||
run_regression
|
||||
;;
|
||||
java_build)
|
||||
echo $JAVA_BUILD_TEST_COMMANDS
|
||||
;;
|
||||
*)
|
||||
echo "Invalid determinator command"
|
||||
;;
|
||||
esac
|
@ -42,7 +42,7 @@ $RunOnly.Add("c_test") | Out-Null
|
||||
$RunOnly.Add("compact_on_deletion_collector_test") | Out-Null
|
||||
$RunOnly.Add("merge_test") | Out-Null
|
||||
$RunOnly.Add("stringappend_test") | Out-Null # Apparently incorrectly written
|
||||
$RunOnly.Add("backup_engine_test") | Out-Null # Disabled
|
||||
$RunOnly.Add("backupable_db_test") | Out-Null # Disabled
|
||||
$RunOnly.Add("timer_queue_test") | Out-Null # Not a gtest
|
||||
|
||||
if($RunAll -and $SuiteRun -ne "") {
|
||||
@ -68,7 +68,7 @@ $BinariesFolder = -Join($RootFolder, "\build\Debug\")
|
||||
|
||||
if($WorkFolder -eq "") {
|
||||
|
||||
# If TEST_TMPDIR is set use it
|
||||
# If TEST_TMPDIR is set use it
|
||||
[string]$var = $Env:TEST_TMPDIR
|
||||
if($var -eq "") {
|
||||
$WorkFolder = -Join($RootFolder, "\db_tests\")
|
||||
@ -93,7 +93,7 @@ $ExcludeCasesSet = New-Object System.Collections.Generic.HashSet[string]
|
||||
if($ExcludeCases -ne "") {
|
||||
Write-Host "ExcludeCases: $ExcludeCases"
|
||||
$l = $ExcludeCases -split ' '
|
||||
ForEach($t in $l) {
|
||||
ForEach($t in $l) {
|
||||
$ExcludeCasesSet.Add($t) | Out-Null
|
||||
}
|
||||
}
|
||||
@ -102,7 +102,7 @@ $ExcludeExesSet = New-Object System.Collections.Generic.HashSet[string]
|
||||
if($ExcludeExes -ne "") {
|
||||
Write-Host "ExcludeExe: $ExcludeExes"
|
||||
$l = $ExcludeExes -split ' '
|
||||
ForEach($t in $l) {
|
||||
ForEach($t in $l) {
|
||||
$ExcludeExesSet.Add($t) | Out-Null
|
||||
}
|
||||
}
|
||||
@ -118,10 +118,6 @@ if($ExcludeExes -ne "") {
|
||||
# MultiThreaded/MultiThreadedDBTest.
|
||||
# MultiThreaded/0 # GetParam() = 0
|
||||
# MultiThreaded/1 # GetParam() = 1
|
||||
# RibbonTypeParamTest/0. # TypeParam = struct DefaultTypesAndSettings
|
||||
# CompactnessAndBacktrackAndFpRate
|
||||
# Extremes
|
||||
# FindOccupancyForSuccessRate
|
||||
#
|
||||
# into this:
|
||||
#
|
||||
@ -129,9 +125,6 @@ if($ExcludeExes -ne "") {
|
||||
# DBTest.WriteEmptyBatch
|
||||
# MultiThreaded/MultiThreadedDBTest.MultiThreaded/0
|
||||
# MultiThreaded/MultiThreadedDBTest.MultiThreaded/1
|
||||
# RibbonTypeParamTest/0.CompactnessAndBacktrackAndFpRate
|
||||
# RibbonTypeParamTest/0.Extremes
|
||||
# RibbonTypeParamTest/0.FindOccupancyForSuccessRate
|
||||
#
|
||||
# Output into the parameter in a form TestName -> Log File Name
|
||||
function ExtractTestCases([string]$GTestExe, $HashTable) {
|
||||
@ -145,8 +138,6 @@ function ExtractTestCases([string]$GTestExe, $HashTable) {
|
||||
|
||||
ForEach( $l in $Tests) {
|
||||
|
||||
# remove trailing comment if any
|
||||
$l = $l -replace '\s+\#.*',''
|
||||
# Leading whitespace is fine
|
||||
$l = $l -replace '^\s+',''
|
||||
# Trailing dot is a test group but no whitespace
|
||||
@ -155,7 +146,8 @@ function ExtractTestCases([string]$GTestExe, $HashTable) {
|
||||
} else {
|
||||
# Otherwise it is a test name, remove leading space
|
||||
$test = $l
|
||||
# create a log name
|
||||
# remove trailing comment if any and create a log name
|
||||
$test = $test -replace '\s+\#.*',''
|
||||
$test = "$Group$test"
|
||||
|
||||
if($ExcludeCasesSet.Contains($test)) {
|
||||
@ -261,7 +253,7 @@ if($Run -ne "") {
|
||||
|
||||
$DiscoveredExe = @()
|
||||
dir -Path $search_path | ForEach-Object {
|
||||
$DiscoveredExe += ($_.Name)
|
||||
$DiscoveredExe += ($_.Name)
|
||||
}
|
||||
|
||||
# Remove exclusions
|
||||
@ -301,7 +293,7 @@ if($SuiteRun -ne "") {
|
||||
|
||||
$ListOfExe = @()
|
||||
dir -Path $search_path | ForEach-Object {
|
||||
$ListOfExe += ($_.Name)
|
||||
$ListOfExe += ($_.Name)
|
||||
}
|
||||
|
||||
# Exclude those in RunOnly from running as suites
|
||||
@ -356,7 +348,7 @@ function RunJobs($Suites, $TestCmds, [int]$ConcurrencyVal)
|
||||
|
||||
# Wait for all to finish and get the results
|
||||
while(($JobToLog.Count -gt 0) -or
|
||||
($TestCmds.Count -gt 0) -or
|
||||
($TestCmds.Count -gt 0) -or
|
||||
($Suites.Count -gt 0)) {
|
||||
|
||||
# Make sure we have maximum concurrent jobs running if anything
|
||||
@ -476,8 +468,8 @@ RunJobs -Suites $CasesToRun -TestCmds $TestExes -ConcurrencyVal $Concurrency
|
||||
|
||||
$EndDate = (Get-Date)
|
||||
|
||||
New-TimeSpan -Start $StartDate -End $EndDate |
|
||||
ForEach-Object {
|
||||
New-TimeSpan -Start $StartDate -End $EndDate |
|
||||
ForEach-Object {
|
||||
"Elapsed time: {0:g}" -f $_
|
||||
}
|
||||
|
||||
@ -491,3 +483,5 @@ if(!$script:success) {
|
||||
}
|
||||
|
||||
exit 0
|
||||
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
set -ex
|
||||
set -e
|
||||
|
||||
ROCKSDB_VERSION="6.7.3"
|
||||
ZSTD_VERSION="1.4.4"
|
||||
ROCKSDB_VERSION="5.10.3"
|
||||
ZSTD_VERSION="1.1.3"
|
||||
|
||||
echo "This script configures CentOS with everything needed to build and run RocksDB"
|
||||
|
||||
@ -40,6 +40,5 @@ cd /usr/local/rocksdb
|
||||
chown -R vagrant:vagrant /usr/local/rocksdb/
|
||||
sudo -u vagrant make static_lib
|
||||
cd examples/
|
||||
sudo -u vagrant LD_LIBRARY_PATH=/usr/local/lib/ make all
|
||||
sudo -u vagrant LD_LIBRARY_PATH=/usr/local/lib/ ./c_simple_example
|
||||
|
||||
sudo -u vagrant make all
|
||||
sudo -u vagrant ./c_simple_example
|
||||
|
@ -6,20 +6,13 @@
|
||||
BASEDIR=$(dirname $0)
|
||||
OUTPUT=""
|
||||
|
||||
function log_header()
|
||||
{
|
||||
echo "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved." >> "$OUTPUT"
|
||||
echo "# The file is generated using update_dependencies.sh." >> "$OUTPUT"
|
||||
}
|
||||
|
||||
|
||||
function log_variable()
|
||||
{
|
||||
echo "$1=${!1}" >> "$OUTPUT"
|
||||
}
|
||||
|
||||
|
||||
TP2_LATEST="/data/users/$USER/fbsource/fbcode/third-party2/"
|
||||
TP2_LATEST="/mnt/vol/engshare/fbcode/third-party2"
|
||||
## $1 => lib name
|
||||
## $2 => lib version (if not provided, will try to pick latest)
|
||||
## $3 => platform (if not provided, will try to pick latest gcc)
|
||||
@ -51,8 +44,6 @@ function get_lib_base()
|
||||
fi
|
||||
|
||||
result=`ls -1d $result/*/ | head -n1`
|
||||
|
||||
echo Finding link $result
|
||||
|
||||
# lib_name => LIB_NAME_BASE
|
||||
local __res_var=${lib_name^^}"_BASE"
|
||||
@ -64,10 +55,10 @@ function get_lib_base()
|
||||
}
|
||||
|
||||
###########################################################
|
||||
# platform010 dependencies #
|
||||
# platform007 dependencies #
|
||||
###########################################################
|
||||
|
||||
OUTPUT="$BASEDIR/dependencies_platform010.sh"
|
||||
OUTPUT="$BASEDIR/dependencies_platform007.sh"
|
||||
|
||||
rm -f "$OUTPUT"
|
||||
touch "$OUTPUT"
|
||||
@ -75,42 +66,38 @@ touch "$OUTPUT"
|
||||
echo "Writing dependencies to $OUTPUT"
|
||||
|
||||
# Compilers locations
|
||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/11.x/centos7-native/*/`
|
||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/12/platform010/*/`
|
||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/7.x/centos7-native/*/`
|
||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos7-native/*/`
|
||||
|
||||
log_header
|
||||
log_variable GCC_BASE
|
||||
log_variable CLANG_BASE
|
||||
|
||||
# Libraries locations
|
||||
get_lib_base libgcc 11.x platform010
|
||||
get_lib_base glibc 2.34 platform010
|
||||
get_lib_base snappy LATEST platform010
|
||||
get_lib_base zlib LATEST platform010
|
||||
get_lib_base bzip2 LATEST platform010
|
||||
get_lib_base lz4 LATEST platform010
|
||||
get_lib_base zstd LATEST platform010
|
||||
get_lib_base gflags LATEST platform010
|
||||
get_lib_base jemalloc LATEST platform010
|
||||
get_lib_base numa LATEST platform010
|
||||
get_lib_base libunwind LATEST platform010
|
||||
get_lib_base tbb 2018_U5 platform010
|
||||
get_lib_base liburing LATEST platform010
|
||||
get_lib_base benchmark LATEST platform010
|
||||
get_lib_base libgcc 7.x platform007
|
||||
get_lib_base glibc 2.26 platform007
|
||||
get_lib_base snappy LATEST platform007
|
||||
get_lib_base zlib LATEST platform007
|
||||
get_lib_base bzip2 LATEST platform007
|
||||
get_lib_base lz4 LATEST platform007
|
||||
get_lib_base zstd LATEST platform007
|
||||
get_lib_base gflags LATEST platform007
|
||||
get_lib_base jemalloc LATEST platform007
|
||||
get_lib_base numa LATEST platform007
|
||||
get_lib_base libunwind LATEST platform007
|
||||
get_lib_base tbb LATEST platform007
|
||||
|
||||
get_lib_base kernel-headers fb platform010
|
||||
get_lib_base kernel-headers fb platform007
|
||||
get_lib_base binutils LATEST centos7-native
|
||||
get_lib_base valgrind LATEST platform010
|
||||
get_lib_base lua 5.3.4 platform010
|
||||
get_lib_base valgrind LATEST platform007
|
||||
get_lib_base lua 5.3.4 platform007
|
||||
|
||||
git diff $OUTPUT
|
||||
|
||||
|
||||
###########################################################
|
||||
# platform009 dependencies #
|
||||
# 5.x dependencies #
|
||||
###########################################################
|
||||
|
||||
OUTPUT="$BASEDIR/dependencies_platform009.sh"
|
||||
OUTPUT="$BASEDIR/dependencies.sh"
|
||||
|
||||
rm -f "$OUTPUT"
|
||||
touch "$OUTPUT"
|
||||
@ -118,32 +105,68 @@ touch "$OUTPUT"
|
||||
echo "Writing dependencies to $OUTPUT"
|
||||
|
||||
# Compilers locations
|
||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/9.x/centos7-native/*/`
|
||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/9.0.0/platform009/*/`
|
||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/5.x/centos7-native/*/`
|
||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos7-native/*/`
|
||||
|
||||
log_header
|
||||
log_variable GCC_BASE
|
||||
log_variable CLANG_BASE
|
||||
|
||||
# Libraries locations
|
||||
get_lib_base libgcc 9.x platform009
|
||||
get_lib_base glibc 2.30 platform009
|
||||
get_lib_base snappy LATEST platform009
|
||||
get_lib_base zlib LATEST platform009
|
||||
get_lib_base bzip2 LATEST platform009
|
||||
get_lib_base lz4 LATEST platform009
|
||||
get_lib_base zstd LATEST platform009
|
||||
get_lib_base gflags LATEST platform009
|
||||
get_lib_base jemalloc LATEST platform009
|
||||
get_lib_base numa LATEST platform009
|
||||
get_lib_base libunwind LATEST platform009
|
||||
get_lib_base tbb 2018_U5 platform009
|
||||
get_lib_base liburing LATEST platform009
|
||||
get_lib_base benchmark LATEST platform009
|
||||
get_lib_base libgcc 5.x gcc-5-glibc-2.23
|
||||
get_lib_base glibc 2.23 gcc-5-glibc-2.23
|
||||
get_lib_base snappy LATEST gcc-5-glibc-2.23
|
||||
get_lib_base zlib LATEST gcc-5-glibc-2.23
|
||||
get_lib_base bzip2 LATEST gcc-5-glibc-2.23
|
||||
get_lib_base lz4 LATEST gcc-5-glibc-2.23
|
||||
get_lib_base zstd LATEST gcc-5-glibc-2.23
|
||||
get_lib_base gflags LATEST gcc-5-glibc-2.23
|
||||
get_lib_base jemalloc LATEST gcc-5-glibc-2.23
|
||||
get_lib_base numa LATEST gcc-5-glibc-2.23
|
||||
get_lib_base libunwind LATEST gcc-5-glibc-2.23
|
||||
get_lib_base tbb LATEST gcc-5-glibc-2.23
|
||||
|
||||
get_lib_base kernel-headers fb platform009
|
||||
get_lib_base kernel-headers 4.0.9-36_fbk5_2933_gd092e3f gcc-5-glibc-2.23
|
||||
get_lib_base binutils LATEST centos7-native
|
||||
get_lib_base valgrind LATEST platform009
|
||||
get_lib_base lua 5.3.4 platform009
|
||||
get_lib_base valgrind LATEST gcc-5-glibc-2.23
|
||||
get_lib_base lua 5.2.3 gcc-5-glibc-2.23
|
||||
|
||||
git diff $OUTPUT
|
||||
|
||||
###########################################################
|
||||
# 4.8.1 dependencies #
|
||||
###########################################################
|
||||
|
||||
OUTPUT="$BASEDIR/dependencies_4.8.1.sh"
|
||||
|
||||
rm -f "$OUTPUT"
|
||||
touch "$OUTPUT"
|
||||
|
||||
echo "Writing 4.8.1 dependencies to $OUTPUT"
|
||||
|
||||
# Compilers locations
|
||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/4.8.1/centos6-native/*/`
|
||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos6-native/*/`
|
||||
|
||||
log_variable GCC_BASE
|
||||
log_variable CLANG_BASE
|
||||
|
||||
# Libraries locations
|
||||
get_lib_base libgcc 4.8.1 gcc-4.8.1-glibc-2.17
|
||||
get_lib_base glibc 2.17 gcc-4.8.1-glibc-2.17
|
||||
get_lib_base snappy LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base zlib LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base bzip2 LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base lz4 LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base zstd LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base gflags LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base jemalloc LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base numa LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base libunwind LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base tbb 4.0_update2 gcc-4.8.1-glibc-2.17
|
||||
|
||||
get_lib_base kernel-headers LATEST gcc-4.8.1-glibc-2.17
|
||||
get_lib_base binutils LATEST centos6-native
|
||||
get_lib_base valgrind 3.8.1 gcc-4.8.1-glibc-2.17
|
||||
get_lib_base lua 5.2.3 centos6-native
|
||||
|
||||
git diff $OUTPUT
|
||||
|
72
cache/cache.cc
vendored
72
cache/cache.cc
vendored
@ -1,72 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "rocksdb/cache.h"
|
||||
|
||||
#include "cache/lru_cache.h"
|
||||
#include "rocksdb/secondary_cache.h"
|
||||
#include "rocksdb/utilities/customizable_util.h"
|
||||
#include "rocksdb/utilities/options_type.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
#ifndef ROCKSDB_LITE
|
||||
static std::unordered_map<std::string, OptionTypeInfo>
|
||||
lru_cache_options_type_info = {
|
||||
{"capacity",
|
||||
{offsetof(struct LRUCacheOptions, capacity), OptionType::kSizeT,
|
||||
OptionVerificationType::kNormal, OptionTypeFlags::kMutable}},
|
||||
{"num_shard_bits",
|
||||
{offsetof(struct LRUCacheOptions, num_shard_bits), OptionType::kInt,
|
||||
OptionVerificationType::kNormal, OptionTypeFlags::kMutable}},
|
||||
{"strict_capacity_limit",
|
||||
{offsetof(struct LRUCacheOptions, strict_capacity_limit),
|
||||
OptionType::kBoolean, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"high_pri_pool_ratio",
|
||||
{offsetof(struct LRUCacheOptions, high_pri_pool_ratio),
|
||||
OptionType::kDouble, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
};
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
Status SecondaryCache::CreateFromString(
|
||||
const ConfigOptions& config_options, const std::string& value,
|
||||
std::shared_ptr<SecondaryCache>* result) {
|
||||
return LoadSharedObject<SecondaryCache>(config_options, value, nullptr,
|
||||
result);
|
||||
}
|
||||
|
||||
Status Cache::CreateFromString(const ConfigOptions& config_options,
|
||||
const std::string& value,
|
||||
std::shared_ptr<Cache>* result) {
|
||||
Status status;
|
||||
std::shared_ptr<Cache> cache;
|
||||
if (value.find('=') == std::string::npos) {
|
||||
cache = NewLRUCache(ParseSizeT(value));
|
||||
} else {
|
||||
#ifndef ROCKSDB_LITE
|
||||
LRUCacheOptions cache_opts;
|
||||
status = OptionTypeInfo::ParseStruct(config_options, "",
|
||||
&lru_cache_options_type_info, "",
|
||||
value, &cache_opts);
|
||||
if (status.ok()) {
|
||||
cache = NewLRUCache(cache_opts);
|
||||
}
|
||||
#else
|
||||
(void)config_options;
|
||||
status = Status::NotSupported("Cannot load cache in LITE mode ", value);
|
||||
#endif //! ROCKSDB_LITE
|
||||
}
|
||||
if (status.ok()) {
|
||||
result->swap(cache);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
280
cache/cache_bench.cc
vendored
280
cache/cache_bench.cc
vendored
@ -1,11 +1,11 @@
|
||||
// Copyright (c) 2013-present, Facebook, Inc. All rights reserved.
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
#ifndef GFLAGS
|
||||
#include <cstdio>
|
||||
int main() {
|
||||
@ -13,8 +13,272 @@ int main() {
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
#include "rocksdb/cache_bench_tool.h"
|
||||
int main(int argc, char** argv) {
|
||||
return ROCKSDB_NAMESPACE::cache_bench_tool(argc, argv);
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "util/gflags_compat.h"
|
||||
#include "util/mutexlock.h"
|
||||
#include "util/random.h"
|
||||
|
||||
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
||||
|
||||
static const uint32_t KB = 1024;
|
||||
|
||||
DEFINE_int32(threads, 16, "Number of concurrent threads to run.");
|
||||
DEFINE_int64(cache_size, 8 * KB * KB,
|
||||
"Number of bytes to use as a cache of uncompressed data.");
|
||||
DEFINE_int32(num_shard_bits, 4, "shard_bits.");
|
||||
|
||||
DEFINE_int64(max_key, 1 * KB * KB * KB, "Max number of key to place in cache");
|
||||
DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
|
||||
|
||||
DEFINE_bool(populate_cache, false, "Populate cache before operations");
|
||||
DEFINE_int32(insert_percent, 40,
|
||||
"Ratio of insert to total workload (expressed as a percentage)");
|
||||
DEFINE_int32(lookup_percent, 50,
|
||||
"Ratio of lookup to total workload (expressed as a percentage)");
|
||||
DEFINE_int32(erase_percent, 10,
|
||||
"Ratio of erase to total workload (expressed as a percentage)");
|
||||
|
||||
DEFINE_bool(use_clock_cache, false, "");
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
class CacheBench;
|
||||
namespace {
|
||||
void deleter(const Slice& /*key*/, void* value) {
|
||||
delete reinterpret_cast<char *>(value);
|
||||
}
|
||||
|
||||
// State shared by all concurrent executions of the same benchmark.
|
||||
class SharedState {
|
||||
public:
|
||||
explicit SharedState(CacheBench* cache_bench)
|
||||
: cv_(&mu_),
|
||||
num_threads_(FLAGS_threads),
|
||||
num_initialized_(0),
|
||||
start_(false),
|
||||
num_done_(0),
|
||||
cache_bench_(cache_bench) {
|
||||
}
|
||||
|
||||
~SharedState() {}
|
||||
|
||||
port::Mutex* GetMutex() {
|
||||
return &mu_;
|
||||
}
|
||||
|
||||
port::CondVar* GetCondVar() {
|
||||
return &cv_;
|
||||
}
|
||||
|
||||
CacheBench* GetCacheBench() const {
|
||||
return cache_bench_;
|
||||
}
|
||||
|
||||
void IncInitialized() {
|
||||
num_initialized_++;
|
||||
}
|
||||
|
||||
void IncDone() {
|
||||
num_done_++;
|
||||
}
|
||||
|
||||
bool AllInitialized() const {
|
||||
return num_initialized_ >= num_threads_;
|
||||
}
|
||||
|
||||
bool AllDone() const {
|
||||
return num_done_ >= num_threads_;
|
||||
}
|
||||
|
||||
void SetStart() {
|
||||
start_ = true;
|
||||
}
|
||||
|
||||
bool Started() const {
|
||||
return start_;
|
||||
}
|
||||
|
||||
private:
|
||||
port::Mutex mu_;
|
||||
port::CondVar cv_;
|
||||
|
||||
const uint64_t num_threads_;
|
||||
uint64_t num_initialized_;
|
||||
bool start_;
|
||||
uint64_t num_done_;
|
||||
|
||||
CacheBench* cache_bench_;
|
||||
};
|
||||
|
||||
// Per-thread state for concurrent executions of the same benchmark.
|
||||
struct ThreadState {
|
||||
uint32_t tid;
|
||||
Random rnd;
|
||||
SharedState* shared;
|
||||
|
||||
ThreadState(uint32_t index, SharedState* _shared)
|
||||
: tid(index), rnd(1000 + index), shared(_shared) {}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
class CacheBench {
|
||||
public:
|
||||
CacheBench() : num_threads_(FLAGS_threads) {
|
||||
if (FLAGS_use_clock_cache) {
|
||||
cache_ = NewClockCache(FLAGS_cache_size, FLAGS_num_shard_bits);
|
||||
if (!cache_) {
|
||||
fprintf(stderr, "Clock cache not supported.\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
cache_ = NewLRUCache(FLAGS_cache_size, FLAGS_num_shard_bits);
|
||||
}
|
||||
}
|
||||
|
||||
~CacheBench() {}
|
||||
|
||||
void PopulateCache() {
|
||||
Random rnd(1);
|
||||
for (int64_t i = 0; i < FLAGS_cache_size; i++) {
|
||||
uint64_t rand_key = rnd.Next() % FLAGS_max_key;
|
||||
// Cast uint64* to be char*, data would be copied to cache
|
||||
Slice key(reinterpret_cast<char*>(&rand_key), 8);
|
||||
// do insert
|
||||
cache_->Insert(key, new char[10], 1, &deleter);
|
||||
}
|
||||
}
|
||||
|
||||
bool Run() {
|
||||
rocksdb::Env* env = rocksdb::Env::Default();
|
||||
|
||||
PrintEnv();
|
||||
SharedState shared(this);
|
||||
std::vector<ThreadState*> threads(num_threads_);
|
||||
for (uint32_t i = 0; i < num_threads_; i++) {
|
||||
threads[i] = new ThreadState(i, &shared);
|
||||
env->StartThread(ThreadBody, threads[i]);
|
||||
}
|
||||
{
|
||||
MutexLock l(shared.GetMutex());
|
||||
while (!shared.AllInitialized()) {
|
||||
shared.GetCondVar()->Wait();
|
||||
}
|
||||
// Record start time
|
||||
uint64_t start_time = env->NowMicros();
|
||||
|
||||
// Start all threads
|
||||
shared.SetStart();
|
||||
shared.GetCondVar()->SignalAll();
|
||||
|
||||
// Wait threads to complete
|
||||
while (!shared.AllDone()) {
|
||||
shared.GetCondVar()->Wait();
|
||||
}
|
||||
|
||||
// Record end time
|
||||
uint64_t end_time = env->NowMicros();
|
||||
double elapsed = static_cast<double>(end_time - start_time) * 1e-6;
|
||||
uint32_t qps = static_cast<uint32_t>(
|
||||
static_cast<double>(FLAGS_threads * FLAGS_ops_per_thread) / elapsed);
|
||||
fprintf(stdout, "Complete in %.3f s; QPS = %u\n", elapsed, qps);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<Cache> cache_;
|
||||
uint32_t num_threads_;
|
||||
|
||||
static void ThreadBody(void* v) {
|
||||
ThreadState* thread = reinterpret_cast<ThreadState*>(v);
|
||||
SharedState* shared = thread->shared;
|
||||
|
||||
{
|
||||
MutexLock l(shared->GetMutex());
|
||||
shared->IncInitialized();
|
||||
if (shared->AllInitialized()) {
|
||||
shared->GetCondVar()->SignalAll();
|
||||
}
|
||||
while (!shared->Started()) {
|
||||
shared->GetCondVar()->Wait();
|
||||
}
|
||||
}
|
||||
thread->shared->GetCacheBench()->OperateCache(thread);
|
||||
|
||||
{
|
||||
MutexLock l(shared->GetMutex());
|
||||
shared->IncDone();
|
||||
if (shared->AllDone()) {
|
||||
shared->GetCondVar()->SignalAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OperateCache(ThreadState* thread) {
|
||||
for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
|
||||
uint64_t rand_key = thread->rnd.Next() % FLAGS_max_key;
|
||||
// Cast uint64* to be char*, data would be copied to cache
|
||||
Slice key(reinterpret_cast<char*>(&rand_key), 8);
|
||||
int32_t prob_op = thread->rnd.Uniform(100);
|
||||
if (prob_op >= 0 && prob_op < FLAGS_insert_percent) {
|
||||
// do insert
|
||||
cache_->Insert(key, new char[10], 1, &deleter);
|
||||
} else if (prob_op -= FLAGS_insert_percent &&
|
||||
prob_op < FLAGS_lookup_percent) {
|
||||
// do lookup
|
||||
auto handle = cache_->Lookup(key);
|
||||
if (handle) {
|
||||
cache_->Release(handle);
|
||||
}
|
||||
} else if (prob_op -= FLAGS_lookup_percent &&
|
||||
prob_op < FLAGS_erase_percent) {
|
||||
// do erase
|
||||
cache_->Erase(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PrintEnv() const {
|
||||
printf("RocksDB version : %d.%d\n", kMajorVersion, kMinorVersion);
|
||||
printf("Number of threads : %d\n", FLAGS_threads);
|
||||
printf("Ops per thread : %" PRIu64 "\n", FLAGS_ops_per_thread);
|
||||
printf("Cache size : %" PRIu64 "\n", FLAGS_cache_size);
|
||||
printf("Num shard bits : %d\n", FLAGS_num_shard_bits);
|
||||
printf("Max key : %" PRIu64 "\n", FLAGS_max_key);
|
||||
printf("Populate cache : %d\n", FLAGS_populate_cache);
|
||||
printf("Insert percentage : %d%%\n", FLAGS_insert_percent);
|
||||
printf("Lookup percentage : %d%%\n", FLAGS_lookup_percent);
|
||||
printf("Erase percentage : %d%%\n", FLAGS_erase_percent);
|
||||
printf("----------------------------\n");
|
||||
}
|
||||
};
|
||||
} // namespace rocksdb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
ParseCommandLineFlags(&argc, &argv, true);
|
||||
|
||||
if (FLAGS_threads <= 0) {
|
||||
fprintf(stderr, "threads number <= 0\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
rocksdb::CacheBench bench;
|
||||
if (FLAGS_populate_cache) {
|
||||
bench.PopulateCache();
|
||||
}
|
||||
if (bench.Run()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // GFLAGS
|
||||
|
924
cache/cache_bench_tool.cc
vendored
924
cache/cache_bench_tool.cc
vendored
@ -1,924 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#ifdef GFLAGS
|
||||
#include <cinttypes>
|
||||
#include <cstddef>
|
||||
#include <cstdio>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
|
||||
#include "db/db_impl/db_impl.h"
|
||||
#include "monitoring/histogram.h"
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/convenience.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/secondary_cache.h"
|
||||
#include "rocksdb/system_clock.h"
|
||||
#include "rocksdb/table_properties.h"
|
||||
#include "table/block_based/block_based_table_reader.h"
|
||||
#include "table/block_based/cachable_entry.h"
|
||||
#include "util/coding.h"
|
||||
#include "util/gflags_compat.h"
|
||||
#include "util/hash.h"
|
||||
#include "util/mutexlock.h"
|
||||
#include "util/random.h"
|
||||
#include "util/stop_watch.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
||||
|
||||
static constexpr uint32_t KiB = uint32_t{1} << 10;
|
||||
static constexpr uint32_t MiB = KiB << 10;
|
||||
static constexpr uint64_t GiB = MiB << 10;
|
||||
|
||||
DEFINE_uint32(threads, 16, "Number of concurrent threads to run.");
|
||||
DEFINE_uint64(cache_size, 1 * GiB,
|
||||
"Number of bytes to use as a cache of uncompressed data.");
|
||||
DEFINE_uint32(num_shard_bits, 6, "shard_bits.");
|
||||
|
||||
DEFINE_double(resident_ratio, 0.25,
|
||||
"Ratio of keys fitting in cache to keyspace.");
|
||||
DEFINE_uint64(ops_per_thread, 2000000U, "Number of operations per thread.");
|
||||
DEFINE_uint32(value_bytes, 8 * KiB, "Size of each value added.");
|
||||
|
||||
DEFINE_uint32(skew, 5, "Degree of skew in key selection");
|
||||
DEFINE_bool(populate_cache, true, "Populate cache before operations");
|
||||
|
||||
DEFINE_uint32(lookup_insert_percent, 87,
|
||||
"Ratio of lookup (+ insert on not found) to total workload "
|
||||
"(expressed as a percentage)");
|
||||
DEFINE_uint32(insert_percent, 2,
|
||||
"Ratio of insert to total workload (expressed as a percentage)");
|
||||
DEFINE_uint32(lookup_percent, 10,
|
||||
"Ratio of lookup to total workload (expressed as a percentage)");
|
||||
DEFINE_uint32(erase_percent, 1,
|
||||
"Ratio of erase to total workload (expressed as a percentage)");
|
||||
DEFINE_bool(gather_stats, false,
|
||||
"Whether to periodically simulate gathering block cache stats, "
|
||||
"using one more thread.");
|
||||
DEFINE_uint32(
|
||||
gather_stats_sleep_ms, 1000,
|
||||
"How many milliseconds to sleep between each gathering of stats.");
|
||||
|
||||
DEFINE_uint32(gather_stats_entries_per_lock, 256,
|
||||
"For Cache::ApplyToAllEntries");
|
||||
DEFINE_bool(skewed, false, "If true, skew the key access distribution");
|
||||
#ifndef ROCKSDB_LITE
|
||||
DEFINE_string(secondary_cache_uri, "",
|
||||
"Full URI for creating a custom secondary cache object");
|
||||
static class std::shared_ptr<ROCKSDB_NAMESPACE::SecondaryCache> secondary_cache;
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
DEFINE_bool(use_clock_cache, false, "");
|
||||
|
||||
// ## BEGIN stress_cache_key sub-tool options ##
|
||||
// See class StressCacheKey below.
|
||||
DEFINE_bool(stress_cache_key, false,
|
||||
"If true, run cache key stress test instead");
|
||||
DEFINE_uint32(
|
||||
sck_files_per_day, 2500000,
|
||||
"(-stress_cache_key) Simulated files generated per simulated day");
|
||||
// NOTE: Giving each run a specified lifetime, rather than e.g. "until
|
||||
// first collision" ensures equal skew from start-up, when collisions are
|
||||
// less likely.
|
||||
DEFINE_uint32(sck_days_per_run, 90,
|
||||
"(-stress_cache_key) Number of days to simulate in each run");
|
||||
// NOTE: The number of observed collisions directly affects the relative
|
||||
// accuracy of the predicted probabilities. 15 observations should be well
|
||||
// within factor-of-2 accuracy.
|
||||
DEFINE_uint32(
|
||||
sck_min_collision, 15,
|
||||
"(-stress_cache_key) Keep running until this many collisions seen");
|
||||
// sck_file_size_mb can be thought of as average file size. The simulation is
|
||||
// not precise enough to care about the distribution of file sizes; other
|
||||
// simulations (https://github.com/pdillinger/unique_id/tree/main/monte_carlo)
|
||||
// indicate the distribution only makes a small difference (e.g. < 2x factor)
|
||||
DEFINE_uint32(
|
||||
sck_file_size_mb, 32,
|
||||
"(-stress_cache_key) Simulated file size in MiB, for accounting purposes");
|
||||
DEFINE_uint32(sck_reopen_nfiles, 100,
|
||||
"(-stress_cache_key) Simulate DB re-open average every n files");
|
||||
DEFINE_uint32(sck_restarts_per_day, 24,
|
||||
"(-stress_cache_key) Average simulated process restarts per day "
|
||||
"(across DBs)");
|
||||
DEFINE_uint32(
|
||||
sck_db_count, 100,
|
||||
"(-stress_cache_key) Parallel DBs in simulation sharing a block cache");
|
||||
DEFINE_uint32(
|
||||
sck_table_bits, 20,
|
||||
"(-stress_cache_key) Log2 number of tracked (live) files (across DBs)");
|
||||
// sck_keep_bits being well below full 128 bits amplifies the collision
|
||||
// probability so that the true probability can be estimated through observed
|
||||
// collisions. (More explanation below.)
|
||||
DEFINE_uint32(
|
||||
sck_keep_bits, 50,
|
||||
"(-stress_cache_key) Number of bits to keep from each cache key (<= 64)");
|
||||
// sck_randomize is used to validate whether cache key is performing "better
|
||||
// than random." Even with this setting, file offsets are not randomized.
|
||||
DEFINE_bool(sck_randomize, false,
|
||||
"(-stress_cache_key) Randomize (hash) cache key");
|
||||
// See https://github.com/facebook/rocksdb/pull/9058
|
||||
DEFINE_bool(sck_footer_unique_id, false,
|
||||
"(-stress_cache_key) Simulate using proposed footer unique id");
|
||||
// ## END stress_cache_key sub-tool options ##
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
class CacheBench;
|
||||
namespace {
|
||||
// State shared by all concurrent executions of the same benchmark.
|
||||
class SharedState {
|
||||
public:
|
||||
explicit SharedState(CacheBench* cache_bench)
|
||||
: cv_(&mu_),
|
||||
num_initialized_(0),
|
||||
start_(false),
|
||||
num_done_(0),
|
||||
cache_bench_(cache_bench) {}
|
||||
|
||||
~SharedState() {}
|
||||
|
||||
port::Mutex* GetMutex() { return &mu_; }
|
||||
|
||||
port::CondVar* GetCondVar() { return &cv_; }
|
||||
|
||||
CacheBench* GetCacheBench() const { return cache_bench_; }
|
||||
|
||||
void IncInitialized() { num_initialized_++; }
|
||||
|
||||
void IncDone() { num_done_++; }
|
||||
|
||||
bool AllInitialized() const { return num_initialized_ >= FLAGS_threads; }
|
||||
|
||||
bool AllDone() const { return num_done_ >= FLAGS_threads; }
|
||||
|
||||
void SetStart() { start_ = true; }
|
||||
|
||||
bool Started() const { return start_; }
|
||||
|
||||
private:
|
||||
port::Mutex mu_;
|
||||
port::CondVar cv_;
|
||||
|
||||
uint64_t num_initialized_;
|
||||
bool start_;
|
||||
uint64_t num_done_;
|
||||
|
||||
CacheBench* cache_bench_;
|
||||
};
|
||||
|
||||
// Per-thread state for concurrent executions of the same benchmark.
|
||||
struct ThreadState {
|
||||
uint32_t tid;
|
||||
Random64 rnd;
|
||||
SharedState* shared;
|
||||
HistogramImpl latency_ns_hist;
|
||||
uint64_t duration_us = 0;
|
||||
|
||||
ThreadState(uint32_t index, SharedState* _shared)
|
||||
: tid(index), rnd(1000 + index), shared(_shared) {}
|
||||
};
|
||||
|
||||
struct KeyGen {
|
||||
char key_data[27];
|
||||
|
||||
Slice GetRand(Random64& rnd, uint64_t max_key, int max_log) {
|
||||
uint64_t key = 0;
|
||||
if (!FLAGS_skewed) {
|
||||
uint64_t raw = rnd.Next();
|
||||
// Skew according to setting
|
||||
for (uint32_t i = 0; i < FLAGS_skew; ++i) {
|
||||
raw = std::min(raw, rnd.Next());
|
||||
}
|
||||
key = FastRange64(raw, max_key);
|
||||
} else {
|
||||
key = rnd.Skewed(max_log);
|
||||
if (key > max_key) {
|
||||
key -= max_key;
|
||||
}
|
||||
}
|
||||
// Variable size and alignment
|
||||
size_t off = key % 8;
|
||||
key_data[0] = char{42};
|
||||
EncodeFixed64(key_data + 1, key);
|
||||
key_data[9] = char{11};
|
||||
EncodeFixed64(key_data + 10, key);
|
||||
key_data[18] = char{4};
|
||||
EncodeFixed64(key_data + 19, key);
|
||||
return Slice(&key_data[off], sizeof(key_data) - off);
|
||||
}
|
||||
};
|
||||
|
||||
char* createValue(Random64& rnd) {
|
||||
char* rv = new char[FLAGS_value_bytes];
|
||||
// Fill with some filler data, and take some CPU time
|
||||
for (uint32_t i = 0; i < FLAGS_value_bytes; i += 8) {
|
||||
EncodeFixed64(rv + i, rnd.Next());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
// Callbacks for secondary cache
|
||||
size_t SizeFn(void* /*obj*/) { return FLAGS_value_bytes; }
|
||||
|
||||
Status SaveToFn(void* obj, size_t /*offset*/, size_t size, void* out) {
|
||||
memcpy(out, obj, size);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Different deleters to simulate using deleter to gather
|
||||
// stats on the code origin and kind of cache entries.
|
||||
void deleter1(const Slice& /*key*/, void* value) {
|
||||
delete[] static_cast<char*>(value);
|
||||
}
|
||||
void deleter2(const Slice& /*key*/, void* value) {
|
||||
delete[] static_cast<char*>(value);
|
||||
}
|
||||
void deleter3(const Slice& /*key*/, void* value) {
|
||||
delete[] static_cast<char*>(value);
|
||||
}
|
||||
|
||||
Cache::CacheItemHelper helper1(SizeFn, SaveToFn, deleter1);
|
||||
Cache::CacheItemHelper helper2(SizeFn, SaveToFn, deleter2);
|
||||
Cache::CacheItemHelper helper3(SizeFn, SaveToFn, deleter3);
|
||||
} // namespace
|
||||
|
||||
class CacheBench {
|
||||
static constexpr uint64_t kHundredthUint64 =
|
||||
std::numeric_limits<uint64_t>::max() / 100U;
|
||||
|
||||
public:
|
||||
CacheBench()
|
||||
: max_key_(static_cast<uint64_t>(FLAGS_cache_size / FLAGS_resident_ratio /
|
||||
FLAGS_value_bytes)),
|
||||
lookup_insert_threshold_(kHundredthUint64 *
|
||||
FLAGS_lookup_insert_percent),
|
||||
insert_threshold_(lookup_insert_threshold_ +
|
||||
kHundredthUint64 * FLAGS_insert_percent),
|
||||
lookup_threshold_(insert_threshold_ +
|
||||
kHundredthUint64 * FLAGS_lookup_percent),
|
||||
erase_threshold_(lookup_threshold_ +
|
||||
kHundredthUint64 * FLAGS_erase_percent),
|
||||
skewed_(FLAGS_skewed) {
|
||||
if (erase_threshold_ != 100U * kHundredthUint64) {
|
||||
fprintf(stderr, "Percentages must add to 100.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
max_log_ = 0;
|
||||
if (skewed_) {
|
||||
uint64_t max_key = max_key_;
|
||||
while (max_key >>= 1) max_log_++;
|
||||
if (max_key > (static_cast<uint64_t>(1) << max_log_)) max_log_++;
|
||||
}
|
||||
|
||||
if (FLAGS_use_clock_cache) {
|
||||
cache_ = NewClockCache(FLAGS_cache_size, FLAGS_num_shard_bits);
|
||||
if (!cache_) {
|
||||
fprintf(stderr, "Clock cache not supported.\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
LRUCacheOptions opts(FLAGS_cache_size, FLAGS_num_shard_bits, false, 0.5);
|
||||
#ifndef ROCKSDB_LITE
|
||||
if (!FLAGS_secondary_cache_uri.empty()) {
|
||||
Status s = SecondaryCache::CreateFromString(
|
||||
ConfigOptions(), FLAGS_secondary_cache_uri, &secondary_cache);
|
||||
if (secondary_cache == nullptr) {
|
||||
fprintf(
|
||||
stderr,
|
||||
"No secondary cache registered matching string: %s status=%s\n",
|
||||
FLAGS_secondary_cache_uri.c_str(), s.ToString().c_str());
|
||||
exit(1);
|
||||
}
|
||||
opts.secondary_cache = secondary_cache;
|
||||
}
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
cache_ = NewLRUCache(opts);
|
||||
}
|
||||
}
|
||||
|
||||
~CacheBench() {}
|
||||
|
||||
void PopulateCache() {
|
||||
Random64 rnd(1);
|
||||
KeyGen keygen;
|
||||
for (uint64_t i = 0; i < 2 * FLAGS_cache_size; i += FLAGS_value_bytes) {
|
||||
cache_->Insert(keygen.GetRand(rnd, max_key_, max_log_), createValue(rnd),
|
||||
&helper1, FLAGS_value_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
bool Run() {
|
||||
const auto clock = SystemClock::Default().get();
|
||||
|
||||
PrintEnv();
|
||||
SharedState shared(this);
|
||||
std::vector<std::unique_ptr<ThreadState> > threads(FLAGS_threads);
|
||||
for (uint32_t i = 0; i < FLAGS_threads; i++) {
|
||||
threads[i].reset(new ThreadState(i, &shared));
|
||||
std::thread(ThreadBody, threads[i].get()).detach();
|
||||
}
|
||||
|
||||
HistogramImpl stats_hist;
|
||||
std::string stats_report;
|
||||
std::thread stats_thread(StatsBody, &shared, &stats_hist, &stats_report);
|
||||
|
||||
uint64_t start_time;
|
||||
{
|
||||
MutexLock l(shared.GetMutex());
|
||||
while (!shared.AllInitialized()) {
|
||||
shared.GetCondVar()->Wait();
|
||||
}
|
||||
// Record start time
|
||||
start_time = clock->NowMicros();
|
||||
|
||||
// Start all threads
|
||||
shared.SetStart();
|
||||
shared.GetCondVar()->SignalAll();
|
||||
|
||||
// Wait threads to complete
|
||||
while (!shared.AllDone()) {
|
||||
shared.GetCondVar()->Wait();
|
||||
}
|
||||
}
|
||||
|
||||
// Stats gathering is considered background work. This time measurement
|
||||
// is for foreground work, and not really ideal for that. See below.
|
||||
uint64_t end_time = clock->NowMicros();
|
||||
stats_thread.join();
|
||||
|
||||
// Wall clock time - includes idle time if threads
|
||||
// finish at different times (not ideal).
|
||||
double elapsed_secs = static_cast<double>(end_time - start_time) * 1e-6;
|
||||
uint32_t ops_per_sec = static_cast<uint32_t>(
|
||||
1.0 * FLAGS_threads * FLAGS_ops_per_thread / elapsed_secs);
|
||||
printf("Complete in %.3f s; Rough parallel ops/sec = %u\n", elapsed_secs,
|
||||
ops_per_sec);
|
||||
|
||||
// Total time in each thread (more accurate throughput measure)
|
||||
elapsed_secs = 0;
|
||||
for (uint32_t i = 0; i < FLAGS_threads; i++) {
|
||||
elapsed_secs += threads[i]->duration_us * 1e-6;
|
||||
}
|
||||
ops_per_sec = static_cast<uint32_t>(1.0 * FLAGS_threads *
|
||||
FLAGS_ops_per_thread / elapsed_secs);
|
||||
printf("Thread ops/sec = %u\n", ops_per_sec);
|
||||
|
||||
printf("\nOperation latency (ns):\n");
|
||||
HistogramImpl combined;
|
||||
for (uint32_t i = 0; i < FLAGS_threads; i++) {
|
||||
combined.Merge(threads[i]->latency_ns_hist);
|
||||
}
|
||||
printf("%s", combined.ToString().c_str());
|
||||
|
||||
if (FLAGS_gather_stats) {
|
||||
printf("\nGather stats latency (us):\n");
|
||||
printf("%s", stats_hist.ToString().c_str());
|
||||
}
|
||||
|
||||
printf("\n%s", stats_report.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<Cache> cache_;
|
||||
const uint64_t max_key_;
|
||||
// Cumulative thresholds in the space of a random uint64_t
|
||||
const uint64_t lookup_insert_threshold_;
|
||||
const uint64_t insert_threshold_;
|
||||
const uint64_t lookup_threshold_;
|
||||
const uint64_t erase_threshold_;
|
||||
const bool skewed_;
|
||||
int max_log_;
|
||||
|
||||
// A benchmark version of gathering stats on an active block cache by
|
||||
// iterating over it. The primary purpose is to measure the impact of
|
||||
// gathering stats with ApplyToAllEntries on throughput- and
|
||||
// latency-sensitive Cache users. Performance of stats gathering is
|
||||
// also reported. The last set of gathered stats is also reported, for
|
||||
// manual sanity checking for logical errors or other unexpected
|
||||
// behavior of cache_bench or the underlying Cache.
|
||||
static void StatsBody(SharedState* shared, HistogramImpl* stats_hist,
|
||||
std::string* stats_report) {
|
||||
if (!FLAGS_gather_stats) {
|
||||
return;
|
||||
}
|
||||
const auto clock = SystemClock::Default().get();
|
||||
uint64_t total_key_size = 0;
|
||||
uint64_t total_charge = 0;
|
||||
uint64_t total_entry_count = 0;
|
||||
std::set<Cache::DeleterFn> deleters;
|
||||
StopWatchNano timer(clock);
|
||||
|
||||
for (;;) {
|
||||
uint64_t time;
|
||||
time = clock->NowMicros();
|
||||
uint64_t deadline = time + uint64_t{FLAGS_gather_stats_sleep_ms} * 1000;
|
||||
|
||||
{
|
||||
MutexLock l(shared->GetMutex());
|
||||
for (;;) {
|
||||
if (shared->AllDone()) {
|
||||
std::ostringstream ostr;
|
||||
ostr << "Most recent cache entry stats:\n"
|
||||
<< "Number of entries: " << total_entry_count << "\n"
|
||||
<< "Total charge: " << BytesToHumanString(total_charge) << "\n"
|
||||
<< "Average key size: "
|
||||
<< (1.0 * total_key_size / total_entry_count) << "\n"
|
||||
<< "Average charge: "
|
||||
<< BytesToHumanString(static_cast<uint64_t>(
|
||||
1.0 * total_charge / total_entry_count))
|
||||
<< "\n"
|
||||
<< "Unique deleters: " << deleters.size() << "\n";
|
||||
*stats_report = ostr.str();
|
||||
return;
|
||||
}
|
||||
if (clock->NowMicros() >= deadline) {
|
||||
break;
|
||||
}
|
||||
uint64_t diff = deadline - std::min(clock->NowMicros(), deadline);
|
||||
shared->GetCondVar()->TimedWait(diff + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Now gather stats, outside of mutex
|
||||
total_key_size = 0;
|
||||
total_charge = 0;
|
||||
total_entry_count = 0;
|
||||
deleters.clear();
|
||||
auto fn = [&](const Slice& key, void* /*value*/, size_t charge,
|
||||
Cache::DeleterFn deleter) {
|
||||
total_key_size += key.size();
|
||||
total_charge += charge;
|
||||
++total_entry_count;
|
||||
// Something slightly more expensive as in (future) stats by category
|
||||
deleters.insert(deleter);
|
||||
};
|
||||
timer.Start();
|
||||
Cache::ApplyToAllEntriesOptions opts;
|
||||
opts.average_entries_per_lock = FLAGS_gather_stats_entries_per_lock;
|
||||
shared->GetCacheBench()->cache_->ApplyToAllEntries(fn, opts);
|
||||
stats_hist->Add(timer.ElapsedNanos() / 1000);
|
||||
}
|
||||
}
|
||||
|
||||
static void ThreadBody(ThreadState* thread) {
|
||||
SharedState* shared = thread->shared;
|
||||
|
||||
{
|
||||
MutexLock l(shared->GetMutex());
|
||||
shared->IncInitialized();
|
||||
if (shared->AllInitialized()) {
|
||||
shared->GetCondVar()->SignalAll();
|
||||
}
|
||||
while (!shared->Started()) {
|
||||
shared->GetCondVar()->Wait();
|
||||
}
|
||||
}
|
||||
thread->shared->GetCacheBench()->OperateCache(thread);
|
||||
|
||||
{
|
||||
MutexLock l(shared->GetMutex());
|
||||
shared->IncDone();
|
||||
if (shared->AllDone()) {
|
||||
shared->GetCondVar()->SignalAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OperateCache(ThreadState* thread) {
|
||||
// To use looked-up values
|
||||
uint64_t result = 0;
|
||||
// To hold handles for a non-trivial amount of time
|
||||
Cache::Handle* handle = nullptr;
|
||||
KeyGen gen;
|
||||
const auto clock = SystemClock::Default().get();
|
||||
uint64_t start_time = clock->NowMicros();
|
||||
StopWatchNano timer(clock);
|
||||
|
||||
for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
|
||||
timer.Start();
|
||||
Slice key = gen.GetRand(thread->rnd, max_key_, max_log_);
|
||||
uint64_t random_op = thread->rnd.Next();
|
||||
Cache::CreateCallback create_cb = [](const void* buf, size_t size,
|
||||
void** out_obj,
|
||||
size_t* charge) -> Status {
|
||||
*out_obj = reinterpret_cast<void*>(new char[size]);
|
||||
memcpy(*out_obj, buf, size);
|
||||
*charge = size;
|
||||
return Status::OK();
|
||||
};
|
||||
|
||||
if (random_op < lookup_insert_threshold_) {
|
||||
if (handle) {
|
||||
cache_->Release(handle);
|
||||
handle = nullptr;
|
||||
}
|
||||
// do lookup
|
||||
handle = cache_->Lookup(key, &helper2, create_cb, Cache::Priority::LOW,
|
||||
true);
|
||||
if (handle) {
|
||||
// do something with the data
|
||||
result += NPHash64(static_cast<char*>(cache_->Value(handle)),
|
||||
FLAGS_value_bytes);
|
||||
} else {
|
||||
// do insert
|
||||
cache_->Insert(key, createValue(thread->rnd), &helper2,
|
||||
FLAGS_value_bytes, &handle);
|
||||
}
|
||||
} else if (random_op < insert_threshold_) {
|
||||
if (handle) {
|
||||
cache_->Release(handle);
|
||||
handle = nullptr;
|
||||
}
|
||||
// do insert
|
||||
cache_->Insert(key, createValue(thread->rnd), &helper3,
|
||||
FLAGS_value_bytes, &handle);
|
||||
} else if (random_op < lookup_threshold_) {
|
||||
if (handle) {
|
||||
cache_->Release(handle);
|
||||
handle = nullptr;
|
||||
}
|
||||
// do lookup
|
||||
handle = cache_->Lookup(key, &helper2, create_cb, Cache::Priority::LOW,
|
||||
true);
|
||||
if (handle) {
|
||||
// do something with the data
|
||||
result += NPHash64(static_cast<char*>(cache_->Value(handle)),
|
||||
FLAGS_value_bytes);
|
||||
}
|
||||
} else if (random_op < erase_threshold_) {
|
||||
// do erase
|
||||
cache_->Erase(key);
|
||||
} else {
|
||||
// Should be extremely unlikely (noop)
|
||||
assert(random_op >= kHundredthUint64 * 100U);
|
||||
}
|
||||
thread->latency_ns_hist.Add(timer.ElapsedNanos());
|
||||
}
|
||||
if (handle) {
|
||||
cache_->Release(handle);
|
||||
handle = nullptr;
|
||||
}
|
||||
// Ensure computations on `result` are not optimized away.
|
||||
if (result == 1) {
|
||||
printf("You are extremely unlucky(2). Try again.\n");
|
||||
exit(1);
|
||||
}
|
||||
thread->duration_us = clock->NowMicros() - start_time;
|
||||
}
|
||||
|
||||
void PrintEnv() const {
|
||||
printf("RocksDB version : %d.%d\n", kMajorVersion, kMinorVersion);
|
||||
printf("Number of threads : %u\n", FLAGS_threads);
|
||||
printf("Ops per thread : %" PRIu64 "\n", FLAGS_ops_per_thread);
|
||||
printf("Cache size : %s\n",
|
||||
BytesToHumanString(FLAGS_cache_size).c_str());
|
||||
printf("Num shard bits : %u\n", FLAGS_num_shard_bits);
|
||||
printf("Max key : %" PRIu64 "\n", max_key_);
|
||||
printf("Resident ratio : %g\n", FLAGS_resident_ratio);
|
||||
printf("Skew degree : %u\n", FLAGS_skew);
|
||||
printf("Populate cache : %d\n", int{FLAGS_populate_cache});
|
||||
printf("Lookup+Insert pct : %u%%\n", FLAGS_lookup_insert_percent);
|
||||
printf("Insert percentage : %u%%\n", FLAGS_insert_percent);
|
||||
printf("Lookup percentage : %u%%\n", FLAGS_lookup_percent);
|
||||
printf("Erase percentage : %u%%\n", FLAGS_erase_percent);
|
||||
std::ostringstream stats;
|
||||
if (FLAGS_gather_stats) {
|
||||
stats << "enabled (" << FLAGS_gather_stats_sleep_ms << "ms, "
|
||||
<< FLAGS_gather_stats_entries_per_lock << "/lock)";
|
||||
} else {
|
||||
stats << "disabled";
|
||||
}
|
||||
printf("Gather stats : %s\n", stats.str().c_str());
|
||||
printf("----------------------------\n");
|
||||
}
|
||||
};
|
||||
|
||||
// cache_bench -stress_cache_key is an independent embedded tool for
|
||||
// estimating the probability of CacheKey collisions through simulation.
|
||||
// At a high level, it simulates generating SST files over many months,
|
||||
// keeping them in the DB and/or cache for some lifetime while staying
|
||||
// under resource caps, and checking for any cache key collisions that
|
||||
// arise among the set of live files. For efficient simulation, we make
|
||||
// some simplifying "pessimistic" assumptions (that only increase the
|
||||
// chance of the simulation reporting a collision relative to the chance
|
||||
// of collision in practice):
|
||||
// * Every generated file has a cache entry for every byte offset in the
|
||||
// file (contiguous range of cache keys)
|
||||
// * All of every file is cached for its entire lifetime. (Here "lifetime"
|
||||
// is technically the union of DB and Cache lifetime, though we only
|
||||
// model a generous DB lifetime, where space usage is always maximized.
|
||||
// In a effective Cache, lifetime in cache can only substantially exceed
|
||||
// lifetime in DB if there is little cache activity; cache activity is
|
||||
// required to hit cache key collisions.)
|
||||
//
|
||||
// It would be possible to track an exact set of cache key ranges for the
|
||||
// set of live files, but we would have no hope of observing collisions
|
||||
// (overlap in live files) in our simulation. We need to employ some way
|
||||
// of amplifying collision probability that allows us to predict the real
|
||||
// collision probability by extrapolation from observed collisions. Our
|
||||
// basic approach is to reduce each cache key range down to some smaller
|
||||
// number of bits, and limiting to bits that are shared over the whole
|
||||
// range. Now we can observe collisions using a set of smaller stripped-down
|
||||
// (reduced) cache keys. Let's do some case analysis to understand why this
|
||||
// works:
|
||||
// * No collision in reduced key - because the reduction is a pure function
|
||||
// this implies no collision in the full keys
|
||||
// * Collision detected between two reduced keys - either
|
||||
// * The reduction has dropped some structured uniqueness info (from one of
|
||||
// session counter or file number; file offsets are never materialized here).
|
||||
// This can only artificially inflate the observed and extrapolated collision
|
||||
// probabilities. We only have to worry about this in designing the reduction.
|
||||
// * The reduction has preserved all the structured uniqueness in the cache
|
||||
// key, which means either
|
||||
// * REJECTED: We have a uniqueness bug in generating cache keys, where
|
||||
// structured uniqueness info should have been different but isn't. In such a
|
||||
// case, increasing by 1 the number of bits kept after reduction would not
|
||||
// reduce observed probabilities by half. (In our observations, the
|
||||
// probabilities are reduced approximately by half.)
|
||||
// * ACCEPTED: The lost unstructured uniqueness in the key determines the
|
||||
// probability that an observed collision would imply an overlap in ranges.
|
||||
// In short, dropping n bits from key would increase collision probability by
|
||||
// 2**n, assuming those n bits have full entropy in unstructured uniqueness.
|
||||
//
|
||||
// But we also have to account for the key ranges based on file size. If file
|
||||
// sizes are roughly 2**b offsets, using XOR in 128-bit cache keys for
|
||||
// "ranges", we know from other simulations (see
|
||||
// https://github.com/pdillinger/unique_id/) that that's roughly equivalent to
|
||||
// (less than 2x higher collision probability) using a cache key of size
|
||||
// 128 - b bits for the whole file. (This is the only place we make an
|
||||
// "optimistic" assumption, which is more than offset by the real
|
||||
// implementation stripping off 2 lower bits from block byte offsets for cache
|
||||
// keys. The simulation assumes byte offsets, which is net pessimistic.)
|
||||
//
|
||||
// So to accept the extrapolation as valid, we need to be confident that all
|
||||
// "lost" bits, excluding those covered by file offset, are full entropy.
|
||||
// Recall that we have assumed (verifiably, safely) that other structured data
|
||||
// (file number and session counter) are kept, not lost. Based on the
|
||||
// implementation comments for OffsetableCacheKey, the only potential hole here
|
||||
// is that we only have ~103 bits of entropy in "all new" session IDs, and in
|
||||
// extreme cases, there might be only 1 DB ID. However, because the upper ~39
|
||||
// bits of session ID are hashed, the combination of file number and file
|
||||
// offset only has to add to 25 bits (or more) to ensure full entropy in
|
||||
// unstructured uniqueness lost in the reduction. Typical file size of 32MB
|
||||
// suffices (at least for simulation purposes where we assume each file offset
|
||||
// occupies a cache key).
|
||||
//
|
||||
// Example results in comments on OffsetableCacheKey.
|
||||
class StressCacheKey {
|
||||
public:
|
||||
void Run() {
|
||||
if (FLAGS_sck_footer_unique_id) {
|
||||
// Proposed footer unique IDs are DB-independent and session-independent
|
||||
// (but process-dependent) which is most easily simulated here by
|
||||
// assuming 1 DB and (later below) no session resets without process
|
||||
// reset.
|
||||
FLAGS_sck_db_count = 1;
|
||||
}
|
||||
|
||||
// Describe the simulated workload
|
||||
uint64_t mb_per_day =
|
||||
uint64_t{FLAGS_sck_files_per_day} * FLAGS_sck_file_size_mb;
|
||||
printf("Total cache or DBs size: %gTiB Writing %g MiB/s or %gTiB/day\n",
|
||||
FLAGS_sck_file_size_mb / 1024.0 / 1024.0 *
|
||||
std::pow(2.0, FLAGS_sck_table_bits),
|
||||
mb_per_day / 86400.0, mb_per_day / 1024.0 / 1024.0);
|
||||
// For extrapolating probability of any collisions from a number of
|
||||
// observed collisions
|
||||
multiplier_ = std::pow(2.0, 128 - FLAGS_sck_keep_bits) /
|
||||
(FLAGS_sck_file_size_mb * 1024.0 * 1024.0);
|
||||
printf(
|
||||
"Multiply by %g to correct for simulation losses (but still assume "
|
||||
"whole file cached)\n",
|
||||
multiplier_);
|
||||
restart_nfiles_ = FLAGS_sck_files_per_day / FLAGS_sck_restarts_per_day;
|
||||
double without_ejection =
|
||||
std::pow(1.414214, FLAGS_sck_keep_bits) / FLAGS_sck_files_per_day;
|
||||
// This should be a lower bound for -sck_randomize, usually a terribly
|
||||
// rough lower bound.
|
||||
// If observation is worse than this, then something has gone wrong.
|
||||
printf(
|
||||
"Without ejection, expect random collision after %g days (%g "
|
||||
"corrected)\n",
|
||||
without_ejection, without_ejection * multiplier_);
|
||||
double with_full_table =
|
||||
std::pow(2.0, FLAGS_sck_keep_bits - FLAGS_sck_table_bits) /
|
||||
FLAGS_sck_files_per_day;
|
||||
// This is an alternate lower bound for -sck_randomize, usually pretty
|
||||
// accurate. Our cache keys should usually perform "better than random"
|
||||
// but always no worse. (If observation is substantially worse than this,
|
||||
// then something has gone wrong.)
|
||||
printf(
|
||||
"With ejection and full table, expect random collision after %g "
|
||||
"days (%g corrected)\n",
|
||||
with_full_table, with_full_table * multiplier_);
|
||||
collisions_ = 0;
|
||||
|
||||
// Run until sufficient number of observed collisions.
|
||||
for (int i = 1; collisions_ < FLAGS_sck_min_collision; i++) {
|
||||
RunOnce();
|
||||
if (collisions_ == 0) {
|
||||
printf(
|
||||
"No collisions after %d x %u days "
|
||||
" \n",
|
||||
i, FLAGS_sck_days_per_run);
|
||||
} else {
|
||||
double est = 1.0 * i * FLAGS_sck_days_per_run / collisions_;
|
||||
printf("%" PRIu64
|
||||
" collisions after %d x %u days, est %g days between (%g "
|
||||
"corrected) \n",
|
||||
collisions_, i, FLAGS_sck_days_per_run, est, est * multiplier_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RunOnce() {
|
||||
// Re-initialized simulated state
|
||||
const size_t db_count = FLAGS_sck_db_count;
|
||||
dbs_.reset(new TableProperties[db_count]{});
|
||||
const size_t table_mask = (size_t{1} << FLAGS_sck_table_bits) - 1;
|
||||
table_.reset(new uint64_t[table_mask + 1]{});
|
||||
if (FLAGS_sck_keep_bits > 64) {
|
||||
FLAGS_sck_keep_bits = 64;
|
||||
}
|
||||
|
||||
// Details of which bits are dropped in reduction
|
||||
uint32_t shift_away = 64 - FLAGS_sck_keep_bits;
|
||||
// Shift away fewer potential file number bits (b) than potential
|
||||
// session counter bits (a).
|
||||
uint32_t shift_away_b = shift_away / 3;
|
||||
uint32_t shift_away_a = shift_away - shift_away_b;
|
||||
|
||||
process_count_ = 0;
|
||||
session_count_ = 0;
|
||||
ResetProcess();
|
||||
|
||||
Random64 r{std::random_device{}()};
|
||||
|
||||
uint64_t max_file_count =
|
||||
uint64_t{FLAGS_sck_files_per_day} * FLAGS_sck_days_per_run;
|
||||
uint64_t file_size = FLAGS_sck_file_size_mb * uint64_t{1024} * 1024U;
|
||||
uint32_t report_count = 0;
|
||||
uint32_t collisions_this_run = 0;
|
||||
size_t db_i = 0;
|
||||
|
||||
for (uint64_t file_count = 1; file_count <= max_file_count;
|
||||
++file_count, ++db_i) {
|
||||
// Round-robin through DBs (this faster than %)
|
||||
if (db_i >= db_count) {
|
||||
db_i = 0;
|
||||
}
|
||||
// Any other periodic actions before simulating next file
|
||||
if (!FLAGS_sck_footer_unique_id && r.OneIn(FLAGS_sck_reopen_nfiles)) {
|
||||
ResetSession(db_i);
|
||||
} else if (r.OneIn(restart_nfiles_)) {
|
||||
ResetProcess();
|
||||
}
|
||||
// Simulate next file
|
||||
OffsetableCacheKey ock;
|
||||
dbs_[db_i].orig_file_number += 1;
|
||||
// skip some file numbers for other file kinds, except in footer unique
|
||||
// ID, orig_file_number here tracks process-wide generated SST file
|
||||
// count.
|
||||
if (!FLAGS_sck_footer_unique_id) {
|
||||
dbs_[db_i].orig_file_number += (r.Next() & 3);
|
||||
}
|
||||
bool is_stable;
|
||||
BlockBasedTable::SetupBaseCacheKey(&dbs_[db_i], /* ignored */ "",
|
||||
/* ignored */ 42, file_size, &ock,
|
||||
&is_stable);
|
||||
assert(is_stable);
|
||||
// Get a representative cache key, which later we analytically generalize
|
||||
// to a range.
|
||||
CacheKey ck = ock.WithOffset(0);
|
||||
uint64_t reduced_key;
|
||||
if (FLAGS_sck_randomize) {
|
||||
reduced_key = GetSliceHash64(ck.AsSlice()) >> shift_away;
|
||||
} else if (FLAGS_sck_footer_unique_id) {
|
||||
// Special case: keep only file number, not session counter
|
||||
uint32_t a = DecodeFixed32(ck.AsSlice().data() + 4) >> shift_away_a;
|
||||
uint32_t b = DecodeFixed32(ck.AsSlice().data() + 12) >> shift_away_b;
|
||||
reduced_key = (uint64_t{a} << 32) + b;
|
||||
} else {
|
||||
// Try to keep file number and session counter (shift away other bits)
|
||||
uint32_t a = DecodeFixed32(ck.AsSlice().data()) << shift_away_a;
|
||||
uint32_t b = DecodeFixed32(ck.AsSlice().data() + 12) >> shift_away_b;
|
||||
reduced_key = (uint64_t{a} << 32) + b;
|
||||
}
|
||||
if (reduced_key == 0) {
|
||||
// Unlikely, but we need to exclude tracking this value because we
|
||||
// use it to mean "empty" in table. This case is OK as long as we
|
||||
// don't hit it often.
|
||||
printf("Hit Zero! \n");
|
||||
file_count--;
|
||||
continue;
|
||||
}
|
||||
uint64_t h =
|
||||
NPHash64(reinterpret_cast<char*>(&reduced_key), sizeof(reduced_key));
|
||||
// Skew expected lifetimes, for high variance (super-Poisson) variance
|
||||
// in actual lifetimes.
|
||||
size_t pos =
|
||||
std::min(Lower32of64(h) & table_mask, Upper32of64(h) & table_mask);
|
||||
if (table_[pos] == reduced_key) {
|
||||
collisions_this_run++;
|
||||
// Our goal is to predict probability of no collisions, not expected
|
||||
// number of collisions. To make the distinction, we have to get rid
|
||||
// of observing correlated collisions, which this takes care of:
|
||||
ResetProcess();
|
||||
} else {
|
||||
// Replace (end of lifetime for file that was in this slot)
|
||||
table_[pos] = reduced_key;
|
||||
}
|
||||
|
||||
if (++report_count == FLAGS_sck_files_per_day) {
|
||||
report_count = 0;
|
||||
// Estimate fill %
|
||||
size_t incr = table_mask / 1000;
|
||||
size_t sampled_count = 0;
|
||||
for (size_t i = 0; i <= table_mask; i += incr) {
|
||||
if (table_[i] != 0) {
|
||||
sampled_count++;
|
||||
}
|
||||
}
|
||||
// Report
|
||||
printf(
|
||||
"%" PRIu64 " days, %" PRIu64 " proc, %" PRIu64
|
||||
" sess, %u coll, occ %g%%, ejected %g%% \r",
|
||||
file_count / FLAGS_sck_files_per_day, process_count_,
|
||||
session_count_, collisions_this_run, 100.0 * sampled_count / 1000.0,
|
||||
100.0 * (1.0 - sampled_count / 1000.0 * table_mask / file_count));
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
collisions_ += collisions_this_run;
|
||||
}
|
||||
|
||||
void ResetSession(size_t i) {
|
||||
dbs_[i].db_session_id = DBImpl::GenerateDbSessionId(nullptr);
|
||||
session_count_++;
|
||||
}
|
||||
|
||||
void ResetProcess() {
|
||||
process_count_++;
|
||||
DBImpl::TEST_ResetDbSessionIdGen();
|
||||
for (size_t i = 0; i < FLAGS_sck_db_count; ++i) {
|
||||
ResetSession(i);
|
||||
}
|
||||
if (FLAGS_sck_footer_unique_id) {
|
||||
// For footer unique ID, this tracks process-wide generated SST file
|
||||
// count.
|
||||
dbs_[0].orig_file_number = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// Use db_session_id and orig_file_number from TableProperties
|
||||
std::unique_ptr<TableProperties[]> dbs_;
|
||||
std::unique_ptr<uint64_t[]> table_;
|
||||
uint64_t process_count_ = 0;
|
||||
uint64_t session_count_ = 0;
|
||||
uint64_t collisions_ = 0;
|
||||
uint32_t restart_nfiles_ = 0;
|
||||
double multiplier_ = 0.0;
|
||||
};
|
||||
|
||||
int cache_bench_tool(int argc, char** argv) {
|
||||
ParseCommandLineFlags(&argc, &argv, true);
|
||||
|
||||
if (FLAGS_stress_cache_key) {
|
||||
// Alternate tool
|
||||
StressCacheKey().Run();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (FLAGS_threads <= 0) {
|
||||
fprintf(stderr, "threads number <= 0\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ROCKSDB_NAMESPACE::CacheBench bench;
|
||||
if (FLAGS_populate_cache) {
|
||||
bench.PopulateCache();
|
||||
printf("Population complete\n");
|
||||
printf("----------------------------\n");
|
||||
}
|
||||
if (bench.Run()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
#endif // GFLAGS
|
128
cache/cache_entry_roles.cc
vendored
128
cache/cache_entry_roles.cc
vendored
@ -1,128 +0,0 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "cache/cache_entry_roles.h"
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "port/lang.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
std::array<std::string, kNumCacheEntryRoles> kCacheEntryRoleToCamelString{{
|
||||
"DataBlock",
|
||||
"FilterBlock",
|
||||
"FilterMetaBlock",
|
||||
"DeprecatedFilterBlock",
|
||||
"IndexBlock",
|
||||
"OtherBlock",
|
||||
"WriteBuffer",
|
||||
"CompressionDictionaryBuildingBuffer",
|
||||
"FilterConstruction",
|
||||
"BlockBasedTableReader",
|
||||
"Misc",
|
||||
}};
|
||||
|
||||
std::array<std::string, kNumCacheEntryRoles> kCacheEntryRoleToHyphenString{{
|
||||
"data-block",
|
||||
"filter-block",
|
||||
"filter-meta-block",
|
||||
"deprecated-filter-block",
|
||||
"index-block",
|
||||
"other-block",
|
||||
"write-buffer",
|
||||
"compression-dictionary-building-buffer",
|
||||
"filter-construction",
|
||||
"block-based-table-reader",
|
||||
"misc",
|
||||
}};
|
||||
|
||||
const std::string& GetCacheEntryRoleName(CacheEntryRole role) {
|
||||
return kCacheEntryRoleToHyphenString[static_cast<size_t>(role)];
|
||||
}
|
||||
|
||||
const std::string& BlockCacheEntryStatsMapKeys::CacheId() {
|
||||
static const std::string kCacheId = "id";
|
||||
return kCacheId;
|
||||
}
|
||||
|
||||
const std::string& BlockCacheEntryStatsMapKeys::CacheCapacityBytes() {
|
||||
static const std::string kCacheCapacityBytes = "capacity";
|
||||
return kCacheCapacityBytes;
|
||||
}
|
||||
|
||||
const std::string&
|
||||
BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds() {
|
||||
static const std::string kLastCollectionDurationSeconds =
|
||||
"secs_for_last_collection";
|
||||
return kLastCollectionDurationSeconds;
|
||||
}
|
||||
|
||||
const std::string& BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds() {
|
||||
static const std::string kLastCollectionAgeSeconds =
|
||||
"secs_since_last_collection";
|
||||
return kLastCollectionAgeSeconds;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::string GetPrefixedCacheEntryRoleName(const std::string& prefix,
|
||||
CacheEntryRole role) {
|
||||
const std::string& role_name = GetCacheEntryRoleName(role);
|
||||
std::string prefixed_role_name;
|
||||
prefixed_role_name.reserve(prefix.size() + role_name.size());
|
||||
prefixed_role_name.append(prefix);
|
||||
prefixed_role_name.append(role_name);
|
||||
return prefixed_role_name;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
std::string BlockCacheEntryStatsMapKeys::EntryCount(CacheEntryRole role) {
|
||||
const static std::string kPrefix = "count.";
|
||||
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||
}
|
||||
|
||||
std::string BlockCacheEntryStatsMapKeys::UsedBytes(CacheEntryRole role) {
|
||||
const static std::string kPrefix = "bytes.";
|
||||
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||
}
|
||||
|
||||
std::string BlockCacheEntryStatsMapKeys::UsedPercent(CacheEntryRole role) {
|
||||
const static std::string kPrefix = "percent.";
|
||||
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct Registry {
|
||||
std::mutex mutex;
|
||||
UnorderedMap<Cache::DeleterFn, CacheEntryRole> role_map;
|
||||
void Register(Cache::DeleterFn fn, CacheEntryRole role) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
role_map[fn] = role;
|
||||
}
|
||||
UnorderedMap<Cache::DeleterFn, CacheEntryRole> Copy() {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
return role_map;
|
||||
}
|
||||
};
|
||||
|
||||
Registry& GetRegistry() {
|
||||
STATIC_AVOID_DESTRUCTION(Registry, registry);
|
||||
return registry;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void RegisterCacheDeleterRole(Cache::DeleterFn fn, CacheEntryRole role) {
|
||||
GetRegistry().Register(fn, role);
|
||||
}
|
||||
|
||||
UnorderedMap<Cache::DeleterFn, CacheEntryRole> CopyCacheDeleterRoleMap() {
|
||||
return GetRegistry().Copy();
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
103
cache/cache_entry_roles.h
vendored
103
cache/cache_entry_roles.h
vendored
@ -1,103 +0,0 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
#include "rocksdb/cache.h"
|
||||
#include "util/hash_containers.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
extern std::array<std::string, kNumCacheEntryRoles>
|
||||
kCacheEntryRoleToCamelString;
|
||||
extern std::array<std::string, kNumCacheEntryRoles>
|
||||
kCacheEntryRoleToHyphenString;
|
||||
|
||||
// To associate cache entries with their role, we use a hack on the
|
||||
// existing Cache interface. Because the deleter of an entry can authenticate
|
||||
// the code origin of an entry, we can elaborate the choice of deleter to
|
||||
// also encode role information, without inferring false role information
|
||||
// from entries not choosing to encode a role.
|
||||
//
|
||||
// The rest of this file is for handling mappings between deleters and
|
||||
// roles.
|
||||
|
||||
// To infer a role from a deleter, the deleter must be registered. This
|
||||
// can be done "manually" with this function. This function is thread-safe,
|
||||
// and the registration mappings go into private but static storage. (Note
|
||||
// that DeleterFn is a function pointer, not std::function. Registrations
|
||||
// should not be too many.)
|
||||
void RegisterCacheDeleterRole(Cache::DeleterFn fn, CacheEntryRole role);
|
||||
|
||||
// Gets a copy of the registered deleter -> role mappings. This is the only
|
||||
// function for reading the mappings made with RegisterCacheDeleterRole.
|
||||
// Why only this interface for reading?
|
||||
// * This function has to be thread safe, which could incur substantial
|
||||
// overhead. We should not pay this overhead for every deleter look-up.
|
||||
// * This is suitable for preparing for batch operations, like with
|
||||
// CacheEntryStatsCollector.
|
||||
// * The number of mappings should be sufficiently small (dozens).
|
||||
UnorderedMap<Cache::DeleterFn, CacheEntryRole> CopyCacheDeleterRoleMap();
|
||||
|
||||
// ************************************************************** //
|
||||
// An automatic registration infrastructure. This enables code
|
||||
// to simply ask for a deleter associated with a particular type
|
||||
// and role, and registration is automatic. In a sense, this is
|
||||
// a small dependency injection infrastructure, because linking
|
||||
// in new deleter instantiations is essentially sufficient for
|
||||
// making stats collection (using CopyCacheDeleterRoleMap) aware
|
||||
// of them.
|
||||
|
||||
namespace cache_entry_roles_detail {
|
||||
|
||||
template <typename T, CacheEntryRole R>
|
||||
struct RegisteredDeleter {
|
||||
RegisteredDeleter() { RegisterCacheDeleterRole(Delete, R); }
|
||||
|
||||
// These have global linkage to help ensure compiler optimizations do not
|
||||
// break uniqueness for each <T,R>
|
||||
static void Delete(const Slice& /* key */, void* value) {
|
||||
// Supports T == Something[], unlike delete operator
|
||||
std::default_delete<T>()(
|
||||
static_cast<typename std::remove_extent<T>::type*>(value));
|
||||
}
|
||||
};
|
||||
|
||||
template <CacheEntryRole R>
|
||||
struct RegisteredNoopDeleter {
|
||||
RegisteredNoopDeleter() { RegisterCacheDeleterRole(Delete, R); }
|
||||
|
||||
static void Delete(const Slice& /* key */, void* /* value */) {
|
||||
// Here was `assert(value == nullptr);` but we can also put pointers
|
||||
// to static data in Cache, for testing at least.
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace cache_entry_roles_detail
|
||||
|
||||
// Get an automatically registered deleter for value type T and role R.
|
||||
// Based on C++ semantics, registration is invoked exactly once in a
|
||||
// thread-safe way on first call to this function, for each <T, R>.
|
||||
template <typename T, CacheEntryRole R>
|
||||
Cache::DeleterFn GetCacheEntryDeleterForRole() {
|
||||
static cache_entry_roles_detail::RegisteredDeleter<T, R> reg;
|
||||
return reg.Delete;
|
||||
}
|
||||
|
||||
// Get an automatically registered no-op deleter (value should be nullptr)
|
||||
// and associated with role R. This is used for Cache "reservation" entries
|
||||
// such as for WriteBufferManager.
|
||||
template <CacheEntryRole R>
|
||||
Cache::DeleterFn GetNoopDeleterForRole() {
|
||||
static cache_entry_roles_detail::RegisteredNoopDeleter<R> reg;
|
||||
return reg.Delete;
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
183
cache/cache_entry_stats.h
vendored
183
cache/cache_entry_stats.h
vendored
@ -1,183 +0,0 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "cache/cache_helpers.h"
|
||||
#include "cache/cache_key.h"
|
||||
#include "port/lang.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/status.h"
|
||||
#include "rocksdb/system_clock.h"
|
||||
#include "test_util/sync_point.h"
|
||||
#include "util/coding_lean.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
// A generic helper object for gathering stats about cache entries by
|
||||
// iterating over them with ApplyToAllEntries. This class essentially
|
||||
// solves the problem of slowing down a Cache with too many stats
|
||||
// collectors that could be sharing stat results, such as from multiple
|
||||
// column families or multiple DBs sharing a Cache. We employ a few
|
||||
// mitigations:
|
||||
// * Only one collector for a particular kind of Stats is alive
|
||||
// for each Cache. This is guaranteed using the Cache itself to hold
|
||||
// the collector.
|
||||
// * A mutex ensures only one thread is gathering stats for this
|
||||
// collector.
|
||||
// * The most recent gathered stats are saved and simply copied to
|
||||
// satisfy requests within a time window (default: 3 minutes) of
|
||||
// completion of the most recent stat gathering.
|
||||
//
|
||||
// Template parameter Stats must be copyable and trivially constructable,
|
||||
// as well as...
|
||||
// concept Stats {
|
||||
// // Notification before applying callback to all entries
|
||||
// void BeginCollection(Cache*, SystemClock*, uint64_t start_time_micros);
|
||||
// // Get the callback to apply to all entries. `callback`
|
||||
// // type must be compatible with Cache::ApplyToAllEntries
|
||||
// callback GetEntryCallback();
|
||||
// // Notification after applying callback to all entries
|
||||
// void EndCollection(Cache*, SystemClock*, uint64_t end_time_micros);
|
||||
// // Notification that a collection was skipped because of
|
||||
// // sufficiently recent saved results.
|
||||
// void SkippedCollection();
|
||||
// }
|
||||
template <class Stats>
|
||||
class CacheEntryStatsCollector {
|
||||
public:
|
||||
// Gather and save stats if saved stats are too old. (Use GetStats() to
|
||||
// read saved stats.)
|
||||
//
|
||||
// Maximum allowed age for a "hit" on saved results is determined by the
|
||||
// two interval parameters. Both set to 0 forces a re-scan. For example
|
||||
// with min_interval_seconds=300 and min_interval_factor=100, if the last
|
||||
// scan took 10s, we would only rescan ("miss") if the age in seconds of
|
||||
// the saved results is > max(300, 100*10).
|
||||
// Justification: scans can vary wildly in duration, e.g. from 0.02 sec
|
||||
// to as much as 20 seconds, so we want to be able to cap the absolute
|
||||
// and relative frequency of scans.
|
||||
void CollectStats(int min_interval_seconds, int min_interval_factor) {
|
||||
// Waits for any pending reader or writer (collector)
|
||||
std::lock_guard<std::mutex> lock(working_mutex_);
|
||||
|
||||
uint64_t max_age_micros =
|
||||
static_cast<uint64_t>(std::max(min_interval_seconds, 0)) * 1000000U;
|
||||
|
||||
if (last_end_time_micros_ > last_start_time_micros_ &&
|
||||
min_interval_factor > 0) {
|
||||
max_age_micros = std::max(
|
||||
max_age_micros, min_interval_factor * (last_end_time_micros_ -
|
||||
last_start_time_micros_));
|
||||
}
|
||||
|
||||
uint64_t start_time_micros = clock_->NowMicros();
|
||||
if ((start_time_micros - last_end_time_micros_) > max_age_micros) {
|
||||
last_start_time_micros_ = start_time_micros;
|
||||
working_stats_.BeginCollection(cache_, clock_, start_time_micros);
|
||||
|
||||
cache_->ApplyToAllEntries(working_stats_.GetEntryCallback(), {});
|
||||
TEST_SYNC_POINT_CALLBACK(
|
||||
"CacheEntryStatsCollector::GetStats:AfterApplyToAllEntries", nullptr);
|
||||
|
||||
uint64_t end_time_micros = clock_->NowMicros();
|
||||
last_end_time_micros_ = end_time_micros;
|
||||
working_stats_.EndCollection(cache_, clock_, end_time_micros);
|
||||
} else {
|
||||
working_stats_.SkippedCollection();
|
||||
}
|
||||
|
||||
// Save so that we don't need to wait for an outstanding collection in
|
||||
// order to make of copy of the last saved stats
|
||||
std::lock_guard<std::mutex> lock2(saved_mutex_);
|
||||
saved_stats_ = working_stats_;
|
||||
}
|
||||
|
||||
// Gets saved stats, regardless of age
|
||||
void GetStats(Stats *stats) {
|
||||
std::lock_guard<std::mutex> lock(saved_mutex_);
|
||||
*stats = saved_stats_;
|
||||
}
|
||||
|
||||
Cache *GetCache() const { return cache_; }
|
||||
|
||||
// Gets or creates a shared instance of CacheEntryStatsCollector in the
|
||||
// cache itself, and saves into `ptr`. This shared_ptr will hold the
|
||||
// entry in cache until all refs are destroyed.
|
||||
static Status GetShared(Cache *cache, SystemClock *clock,
|
||||
std::shared_ptr<CacheEntryStatsCollector> *ptr) {
|
||||
const Slice &cache_key = GetCacheKey();
|
||||
|
||||
Cache::Handle *h = cache->Lookup(cache_key);
|
||||
if (h == nullptr) {
|
||||
// Not yet in cache, but Cache doesn't provide a built-in way to
|
||||
// avoid racing insert. So we double-check under a shared mutex,
|
||||
// inspired by TableCache.
|
||||
STATIC_AVOID_DESTRUCTION(std::mutex, static_mutex);
|
||||
std::lock_guard<std::mutex> lock(static_mutex);
|
||||
|
||||
h = cache->Lookup(cache_key);
|
||||
if (h == nullptr) {
|
||||
auto new_ptr = new CacheEntryStatsCollector(cache, clock);
|
||||
// TODO: non-zero charge causes some tests that count block cache
|
||||
// usage to go flaky. Fix the problem somehow so we can use an
|
||||
// accurate charge.
|
||||
size_t charge = 0;
|
||||
Status s = cache->Insert(cache_key, new_ptr, charge, Deleter, &h,
|
||||
Cache::Priority::HIGH);
|
||||
if (!s.ok()) {
|
||||
assert(h == nullptr);
|
||||
delete new_ptr;
|
||||
return s;
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we reach here, shared entry is in cache with handle `h`.
|
||||
assert(cache->GetDeleter(h) == Deleter);
|
||||
|
||||
// Build an aliasing shared_ptr that keeps `ptr` in cache while there
|
||||
// are references.
|
||||
*ptr = MakeSharedCacheHandleGuard<CacheEntryStatsCollector>(cache, h);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
private:
|
||||
explicit CacheEntryStatsCollector(Cache *cache, SystemClock *clock)
|
||||
: saved_stats_(),
|
||||
working_stats_(),
|
||||
last_start_time_micros_(0),
|
||||
last_end_time_micros_(/*pessimistic*/ 10000000),
|
||||
cache_(cache),
|
||||
clock_(clock) {}
|
||||
|
||||
static void Deleter(const Slice &, void *value) {
|
||||
delete static_cast<CacheEntryStatsCollector *>(value);
|
||||
}
|
||||
|
||||
static const Slice &GetCacheKey() {
|
||||
// For each template instantiation
|
||||
static CacheKey ckey = CacheKey::CreateUniqueForProcessLifetime();
|
||||
static Slice ckey_slice = ckey.AsSlice();
|
||||
return ckey_slice;
|
||||
}
|
||||
|
||||
std::mutex saved_mutex_;
|
||||
Stats saved_stats_;
|
||||
|
||||
std::mutex working_mutex_;
|
||||
Stats working_stats_;
|
||||
uint64_t last_start_time_micros_;
|
||||
uint64_t last_end_time_micros_;
|
||||
|
||||
Cache *const cache_;
|
||||
SystemClock *const clock_;
|
||||
};
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
125
cache/cache_helpers.h
vendored
125
cache/cache_helpers.h
vendored
@ -1,125 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/rocksdb_namespace.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
// Returns the cached value given a cache handle.
|
||||
template <typename T>
|
||||
T* GetFromCacheHandle(Cache* cache, Cache::Handle* handle) {
|
||||
assert(cache);
|
||||
assert(handle);
|
||||
|
||||
return static_cast<T*>(cache->Value(handle));
|
||||
}
|
||||
|
||||
// Simple generic deleter for Cache (to be used with Cache::Insert).
|
||||
template <typename T>
|
||||
void DeleteCacheEntry(const Slice& /* key */, void* value) {
|
||||
delete static_cast<T*>(value);
|
||||
}
|
||||
|
||||
// Turns a T* into a Slice so it can be used as a key with Cache.
|
||||
template <typename T>
|
||||
Slice GetSlice(const T* t) {
|
||||
return Slice(reinterpret_cast<const char*>(t), sizeof(T));
|
||||
}
|
||||
|
||||
// Generic resource management object for cache handles that releases the handle
|
||||
// when destroyed. Has unique ownership of the handle, so copying it is not
|
||||
// allowed, while moving it transfers ownership.
|
||||
template <typename T>
|
||||
class CacheHandleGuard {
|
||||
public:
|
||||
CacheHandleGuard() = default;
|
||||
|
||||
CacheHandleGuard(Cache* cache, Cache::Handle* handle)
|
||||
: cache_(cache),
|
||||
handle_(handle),
|
||||
value_(GetFromCacheHandle<T>(cache, handle)) {
|
||||
assert(cache_ && handle_ && value_);
|
||||
}
|
||||
|
||||
CacheHandleGuard(const CacheHandleGuard&) = delete;
|
||||
CacheHandleGuard& operator=(const CacheHandleGuard&) = delete;
|
||||
|
||||
CacheHandleGuard(CacheHandleGuard&& rhs) noexcept
|
||||
: cache_(rhs.cache_), handle_(rhs.handle_), value_(rhs.value_) {
|
||||
assert((!cache_ && !handle_ && !value_) || (cache_ && handle_ && value_));
|
||||
|
||||
rhs.ResetFields();
|
||||
}
|
||||
|
||||
CacheHandleGuard& operator=(CacheHandleGuard&& rhs) noexcept {
|
||||
if (this == &rhs) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
ReleaseHandle();
|
||||
|
||||
cache_ = rhs.cache_;
|
||||
handle_ = rhs.handle_;
|
||||
value_ = rhs.value_;
|
||||
|
||||
assert((!cache_ && !handle_ && !value_) || (cache_ && handle_ && value_));
|
||||
|
||||
rhs.ResetFields();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
~CacheHandleGuard() { ReleaseHandle(); }
|
||||
|
||||
bool IsEmpty() const { return !handle_; }
|
||||
|
||||
Cache* GetCache() const { return cache_; }
|
||||
Cache::Handle* GetCacheHandle() const { return handle_; }
|
||||
T* GetValue() const { return value_; }
|
||||
|
||||
void Reset() {
|
||||
ReleaseHandle();
|
||||
ResetFields();
|
||||
}
|
||||
|
||||
private:
|
||||
void ReleaseHandle() {
|
||||
if (IsEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(cache_);
|
||||
cache_->Release(handle_);
|
||||
}
|
||||
|
||||
void ResetFields() {
|
||||
cache_ = nullptr;
|
||||
handle_ = nullptr;
|
||||
value_ = nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
Cache* cache_ = nullptr;
|
||||
Cache::Handle* handle_ = nullptr;
|
||||
T* value_ = nullptr;
|
||||
};
|
||||
|
||||
// Build an aliasing shared_ptr that keeps `handle` in cache while there
|
||||
// are references, but the pointer is to the value for that cache entry,
|
||||
// which must be of type T. This is copyable, unlike CacheHandleGuard, but
|
||||
// does not provide access to caching details.
|
||||
template <typename T>
|
||||
std::shared_ptr<T> MakeSharedCacheHandleGuard(Cache* cache,
|
||||
Cache::Handle* handle) {
|
||||
auto wrapper = std::make_shared<CacheHandleGuard<T>>(cache, handle);
|
||||
return std::shared_ptr<T>(wrapper, static_cast<T*>(cache->Value(handle)));
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
344
cache/cache_key.cc
vendored
344
cache/cache_key.cc
vendored
@ -1,344 +0,0 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "cache/cache_key.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
|
||||
#include "rocksdb/cache.h"
|
||||
#include "table/unique_id_impl.h"
|
||||
#include "util/hash.h"
|
||||
#include "util/math.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
// Value space plan for CacheKey:
|
||||
//
|
||||
// session_etc64_ | offset_etc64_ | Only generated by
|
||||
// ---------------+---------------+------------------------------------------
|
||||
// 0 | 0 | Reserved for "empty" CacheKey()
|
||||
// 0 | > 0, < 1<<63 | CreateUniqueForCacheLifetime
|
||||
// 0 | >= 1<<63 | CreateUniqueForProcessLifetime
|
||||
// > 0 | any | OffsetableCacheKey.WithOffset
|
||||
|
||||
CacheKey CacheKey::CreateUniqueForCacheLifetime(Cache *cache) {
|
||||
// +1 so that we can reserve all zeros for "unset" cache key
|
||||
uint64_t id = cache->NewId() + 1;
|
||||
// Ensure we don't collide with CreateUniqueForProcessLifetime
|
||||
assert((id >> 63) == 0U);
|
||||
return CacheKey(0, id);
|
||||
}
|
||||
|
||||
CacheKey CacheKey::CreateUniqueForProcessLifetime() {
|
||||
// To avoid colliding with CreateUniqueForCacheLifetime, assuming
|
||||
// Cache::NewId counts up from zero, here we count down from UINT64_MAX.
|
||||
// If this ever becomes a point of contention, we could sub-divide the
|
||||
// space and use CoreLocalArray.
|
||||
static std::atomic<uint64_t> counter{UINT64_MAX};
|
||||
uint64_t id = counter.fetch_sub(1, std::memory_order_relaxed);
|
||||
// Ensure we don't collide with CreateUniqueForCacheLifetime
|
||||
assert((id >> 63) == 1U);
|
||||
return CacheKey(0, id);
|
||||
}
|
||||
|
||||
// Value plan for CacheKeys from OffsetableCacheKey, assuming that
|
||||
// db_session_ids are generated from a base_session_id and
|
||||
// session_id_counter (by SemiStructuredUniqueIdGen+EncodeSessionId
|
||||
// in DBImpl::GenerateDbSessionId):
|
||||
//
|
||||
// Conceptual inputs:
|
||||
// db_id (unstructured, from GenerateRawUniqueId or equiv)
|
||||
// * could be shared between cloned DBs but rare
|
||||
// * could be constant, if session id suffices
|
||||
// base_session_id (unstructured, from GenerateRawUniqueId)
|
||||
// session_id_counter (structured)
|
||||
// * usually much smaller than 2**24
|
||||
// file_number (structured)
|
||||
// * usually smaller than 2**24
|
||||
// offset_in_file (structured, might skip lots of values)
|
||||
// * usually smaller than 2**32
|
||||
// max_offset determines placement of file_number to prevent
|
||||
// overlapping with offset
|
||||
//
|
||||
// Outputs come from bitwise-xor of the constituent pieces, low bits on left:
|
||||
//
|
||||
// |------------------------- session_etc64 -------------------------|
|
||||
// | +++++++++++++++ base_session_id (lower 64 bits) +++++++++++++++ |
|
||||
// |-----------------------------------------------------------------|
|
||||
// | session_id_counter ...| |
|
||||
// |-----------------------------------------------------------------|
|
||||
// | | ... file_number |
|
||||
// | | overflow & meta |
|
||||
// |-----------------------------------------------------------------|
|
||||
//
|
||||
//
|
||||
// |------------------------- offset_etc64 --------------------------|
|
||||
// | hash of: ++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
|
||||
// | * base_session_id (upper ~39 bits) |
|
||||
// | * db_id (~122 bits entropy) |
|
||||
// |-----------------------------------------------------------------|
|
||||
// | offset_in_file ............... | |
|
||||
// |-----------------------------------------------------------------|
|
||||
// | | file_number, 0-3 |
|
||||
// | | lower bytes |
|
||||
// |-----------------------------------------------------------------|
|
||||
//
|
||||
// Based on max_offset, a maximal number of bytes 0..3 is chosen for
|
||||
// including from lower bits of file_number in offset_etc64. The choice
|
||||
// is encoded in two bits of metadata going into session_etc64, though
|
||||
// the common case of 3 bytes is encoded as 0 so that session_etc64
|
||||
// is unmodified by file_number concerns in the common case.
|
||||
//
|
||||
// There is nothing preventing "file number overflow & meta" from meeting
|
||||
// and overlapping with session_id_counter, but reaching such a case requires
|
||||
// an intractable combination of large file offsets (thus at least some large
|
||||
// files), large file numbers (thus large number of files generated), and
|
||||
// large number of session IDs generated in a single process. A trillion each
|
||||
// (2**40) of session ids, offsets, and file numbers comes to 120 bits.
|
||||
// With two bits of metadata and byte granularity, this is on the verge of
|
||||
// overlap, but even in the overlap case, it doesn't seem likely that
|
||||
// a file from billions of files or session ids ago will still be live
|
||||
// or cached.
|
||||
//
|
||||
// In fact, if our SST files are all < 4TB (see
|
||||
// BlockBasedTable::kMaxFileSizeStandardEncoding), then SST files generated
|
||||
// in a single process are guaranteed to have unique cache keys, unless/until
|
||||
// number session ids * max file number = 2**86, e.g. 1 trillion DB::Open in
|
||||
// a single process and 64 trillion files generated. Even at that point, to
|
||||
// see a collision we would need a miraculous re-synchronization of session
|
||||
// id and file number, along with a live file or stale cache entry from
|
||||
// trillions of files ago.
|
||||
//
|
||||
// How https://github.com/pdillinger/unique_id applies here:
|
||||
// Every bit of output always includes "unstructured" uniqueness bits and
|
||||
// often combines with "structured" uniqueness bits. The "unstructured" bits
|
||||
// change infrequently: only when we cannot guarantee our state tracking for
|
||||
// "structured" uniqueness hasn't been cloned. Using a static
|
||||
// SemiStructuredUniqueIdGen for db_session_ids, this means we only get an
|
||||
// "all new" session id when a new process uses RocksDB. (Between processes,
|
||||
// we don't know if a DB or other persistent storage has been cloned. We
|
||||
// assume that if VM hot cloning is used, subsequently generated SST files
|
||||
// do not interact.) Within a process, only the session_lower of the
|
||||
// db_session_id changes incrementally ("structured" uniqueness).
|
||||
//
|
||||
// This basically means that our offsets, counters and file numbers allow us
|
||||
// to do somewhat "better than random" (birthday paradox) while in the
|
||||
// degenerate case of completely new session for each tiny file, we still
|
||||
// have strong uniqueness properties from the birthday paradox, with ~103
|
||||
// bit session IDs or up to 128 bits entropy with different DB IDs sharing a
|
||||
// cache.
|
||||
//
|
||||
// More collision probability analysis:
|
||||
// Suppose a RocksDB host generates (generously) 2 GB/s (10TB data, 17 DWPD)
|
||||
// with average process/session lifetime of (pessimistically) 4 minutes.
|
||||
// In 180 days (generous allowable data lifespan), we generate 31 million GB
|
||||
// of data, or 2^55 bytes, and 2^16 "all new" session IDs.
|
||||
//
|
||||
// First, suppose this is in a single DB (lifetime 180 days):
|
||||
// 128 bits cache key size
|
||||
// - 55 <- ideal size for byte offsets + file numbers
|
||||
// - 2 <- bits for offsets and file numbers not exactly powers of two
|
||||
// - 2 <- bits for file number encoding metadata
|
||||
// + 2 <- bits saved not using byte offsets in BlockBasedTable::GetCacheKey
|
||||
// ----
|
||||
// 71 <- bits remaining for distinguishing session IDs
|
||||
// The probability of a collision in 71 bits of session ID data is less than
|
||||
// 1 in 2**(71 - (2 * 16)), or roughly 1 in a trillion. And this assumes all
|
||||
// data from the last 180 days is in cache for potential collision, and that
|
||||
// cache keys under each session id exhaustively cover the remaining 57 bits
|
||||
// while in reality they'll only cover a small fraction of it.
|
||||
//
|
||||
// Although data could be transferred between hosts, each host has its own
|
||||
// cache and we are already assuming a high rate of "all new" session ids.
|
||||
// So this doesn't really change the collision calculation. Across a fleet
|
||||
// of 1 million, each with <1 in a trillion collision possibility,
|
||||
// fleetwide collision probability is <1 in a million.
|
||||
//
|
||||
// Now suppose we have many DBs per host, say 2**10, with same host-wide write
|
||||
// rate and process/session lifetime. File numbers will be ~10 bits smaller
|
||||
// and we will have 2**10 times as many session IDs because of simultaneous
|
||||
// lifetimes. So now collision chance is less than 1 in 2**(81 - (2 * 26)),
|
||||
// or roughly 1 in a billion.
|
||||
//
|
||||
// Suppose instead we generated random or hashed cache keys for each
|
||||
// (compressed) block. For 1KB compressed block size, that is 2^45 cache keys
|
||||
// in 180 days. Collision probability is more easily estimated at roughly
|
||||
// 1 in 2**(128 - (2 * 45)) or roughly 1 in a trillion (assuming all
|
||||
// data from the last 180 days is in cache, but NOT the other assumption
|
||||
// for the 1 in a trillion estimate above).
|
||||
//
|
||||
//
|
||||
// Collision probability estimation through simulation:
|
||||
// A tool ./cache_bench -stress_cache_key broadly simulates host-wide cache
|
||||
// activity over many months, by making some pessimistic simplifying
|
||||
// assumptions. See class StressCacheKey in cache_bench_tool.cc for details.
|
||||
// Here is some sample output with
|
||||
// `./cache_bench -stress_cache_key -sck_keep_bits=40`:
|
||||
//
|
||||
// Total cache or DBs size: 32TiB Writing 925.926 MiB/s or 76.2939TiB/day
|
||||
// Multiply by 9.22337e+18 to correct for simulation losses (but still
|
||||
// assume whole file cached)
|
||||
//
|
||||
// These come from default settings of 2.5M files per day of 32 MB each, and
|
||||
// `-sck_keep_bits=40` means that to represent a single file, we are only
|
||||
// keeping 40 bits of the 128-bit (base) cache key. With file size of 2**25
|
||||
// contiguous keys (pessimistic), our simulation is about 2\*\*(128-40-25) or
|
||||
// about 9 billion billion times more prone to collision than reality.
|
||||
//
|
||||
// More default assumptions, relatively pessimistic:
|
||||
// * 100 DBs in same process (doesn't matter much)
|
||||
// * Re-open DB in same process (new session ID related to old session ID) on
|
||||
// average every 100 files generated
|
||||
// * Restart process (all new session IDs unrelated to old) 24 times per day
|
||||
//
|
||||
// After enough data, we get a result at the end (-sck_keep_bits=40):
|
||||
//
|
||||
// (keep 40 bits) 17 collisions after 2 x 90 days, est 10.5882 days between
|
||||
// (9.76592e+19 corrected)
|
||||
//
|
||||
// If we believe the (pessimistic) simulation and the mathematical
|
||||
// extrapolation, we would need to run a billion machines all for 97 billion
|
||||
// days to expect a cache key collision. To help verify that our extrapolation
|
||||
// ("corrected") is robust, we can make our simulation more precise with
|
||||
// `-sck_keep_bits=41` and `42`, which takes more running time to get enough
|
||||
// collision data:
|
||||
//
|
||||
// (keep 41 bits) 16 collisions after 4 x 90 days, est 22.5 days between
|
||||
// (1.03763e+20 corrected)
|
||||
// (keep 42 bits) 19 collisions after 10 x 90 days, est 47.3684 days between
|
||||
// (1.09224e+20 corrected)
|
||||
//
|
||||
// The extrapolated prediction is very close. If anything, we might have some
|
||||
// very small losses of structured data (see class StressCacheKey in
|
||||
// cache_bench_tool.cc) leading to more accurate & more attractive prediction
|
||||
// with more bits kept.
|
||||
//
|
||||
// With the `-sck_randomize` option, we can see that typical workloads like
|
||||
// above have lower collision probability than "random" cache keys (note:
|
||||
// offsets still non-randomized) by a modest amount (roughly 20x less collision
|
||||
// prone than random), which should make us reasonably comfortable even in
|
||||
// "degenerate" cases (e.g. repeatedly launch a process to generate 1 file
|
||||
// with SstFileWriter):
|
||||
//
|
||||
// (rand 40 bits) 197 collisions after 1 x 90 days, est 0.456853 days between
|
||||
// (4.21372e+18 corrected)
|
||||
//
|
||||
// We can see that with more frequent process restarts (all new session IDs),
|
||||
// we get closer to the "random" cache key performance:
|
||||
//
|
||||
// (-sck_restarts_per_day=5000): 140 collisions after 1 x 90 days, ...
|
||||
// (5.92931e+18 corrected)
|
||||
//
|
||||
// Other tests have been run to validate other conditions behave as expected,
|
||||
// never behaving "worse than random" unless we start chopping off structured
|
||||
// data.
|
||||
//
|
||||
//
|
||||
// Conclusion: Even in extreme cases, rapidly burning through "all new" IDs
|
||||
// that only arise when a new process is started, the chance of any cache key
|
||||
// collisions in a giant fleet of machines is negligible. Especially when
|
||||
// processes live for hours or days, the chance of a cache key collision is
|
||||
// likely more plausibly due to bad hardware than to bad luck in random
|
||||
// session ID data. Software defects are surely more likely to cause corruption
|
||||
// than both of those.
|
||||
//
|
||||
// TODO: Nevertheless / regardless, an efficient way to detect (and thus
|
||||
// quantify) block cache corruptions, including collisions, should be added.
|
||||
OffsetableCacheKey::OffsetableCacheKey(const std::string &db_id,
|
||||
const std::string &db_session_id,
|
||||
uint64_t file_number,
|
||||
uint64_t max_offset) {
|
||||
#ifndef NDEBUG
|
||||
max_offset_ = max_offset;
|
||||
#endif
|
||||
// Closely related to GetSstInternalUniqueId, but only need 128 bits and
|
||||
// need to include an offset within the file.
|
||||
// See also https://github.com/pdillinger/unique_id for background.
|
||||
uint64_t session_upper = 0; // Assignment to appease clang-analyze
|
||||
uint64_t session_lower = 0; // Assignment to appease clang-analyze
|
||||
{
|
||||
Status s = DecodeSessionId(db_session_id, &session_upper, &session_lower);
|
||||
if (!s.ok()) {
|
||||
// A reasonable fallback in case malformed
|
||||
Hash2x64(db_session_id.data(), db_session_id.size(), &session_upper,
|
||||
&session_lower);
|
||||
}
|
||||
}
|
||||
|
||||
// Hash the session upper (~39 bits entropy) and DB id (120+ bits entropy)
|
||||
// for more global uniqueness entropy.
|
||||
// (It is possible that many DBs descended from one common DB id are copied
|
||||
// around and proliferate, in which case session id is critical, but it is
|
||||
// more common for different DBs to have different DB ids.)
|
||||
uint64_t db_hash = Hash64(db_id.data(), db_id.size(), session_upper);
|
||||
|
||||
// This establishes the db+session id part of the cache key.
|
||||
//
|
||||
// Exactly preserve (in common cases; see modifiers below) session lower to
|
||||
// ensure that session ids generated during the same process lifetime are
|
||||
// guaranteed unique.
|
||||
//
|
||||
// We put this first for CommonPrefixSlice(), so that a small-ish set of
|
||||
// cache key prefixes to cover entries relevant to any DB.
|
||||
session_etc64_ = session_lower;
|
||||
// This provides extra entopy in case of different DB id or process
|
||||
// generating a session id, but is also partly/variably obscured by
|
||||
// file_number and offset (see below).
|
||||
offset_etc64_ = db_hash;
|
||||
|
||||
// Into offset_etc64_ we are (eventually) going to pack & xor in an offset and
|
||||
// a file_number, but we might need the file_number to overflow into
|
||||
// session_etc64_. (There must only be one session_etc64_ value per
|
||||
// file, and preferably shared among many files.)
|
||||
//
|
||||
// Figure out how many bytes of file_number we are going to be able to
|
||||
// pack in with max_offset, though our encoding will only support packing
|
||||
// in up to 3 bytes of file_number. (16M file numbers is enough for a new
|
||||
// file number every second for half a year.)
|
||||
int file_number_bytes_in_offset_etc =
|
||||
(63 - FloorLog2(max_offset | 0x100000000U)) / 8;
|
||||
int file_number_bits_in_offset_etc = file_number_bytes_in_offset_etc * 8;
|
||||
|
||||
// Assert two bits of metadata
|
||||
assert(file_number_bytes_in_offset_etc >= 0 &&
|
||||
file_number_bytes_in_offset_etc <= 3);
|
||||
// Assert we couldn't have used a larger allowed number of bytes (shift
|
||||
// would chop off bytes).
|
||||
assert(file_number_bytes_in_offset_etc == 3 ||
|
||||
(max_offset << (file_number_bits_in_offset_etc + 8) >>
|
||||
(file_number_bits_in_offset_etc + 8)) != max_offset);
|
||||
|
||||
uint64_t mask = (uint64_t{1} << (file_number_bits_in_offset_etc)) - 1;
|
||||
// Pack into high bits of etc so that offset can go in low bits of etc
|
||||
// TODO: could be EndianSwapValue?
|
||||
uint64_t offset_etc_modifier = ReverseBits(file_number & mask);
|
||||
assert(offset_etc_modifier << file_number_bits_in_offset_etc == 0U);
|
||||
|
||||
// Overflow and 3 - byte count (likely both zero) go into session_id part
|
||||
uint64_t session_etc_modifier =
|
||||
(file_number >> file_number_bits_in_offset_etc << 2) |
|
||||
static_cast<uint64_t>(3 - file_number_bytes_in_offset_etc);
|
||||
// Packed into high bits to minimize interference with session id counter.
|
||||
session_etc_modifier = ReverseBits(session_etc_modifier);
|
||||
|
||||
// Assert session_id part is only modified in extreme cases
|
||||
assert(session_etc_modifier == 0 || file_number > /*3 bytes*/ 0xffffffU ||
|
||||
max_offset > /*5 bytes*/ 0xffffffffffU);
|
||||
|
||||
// Xor in the modifiers
|
||||
session_etc64_ ^= session_etc_modifier;
|
||||
offset_etc64_ ^= offset_etc_modifier;
|
||||
|
||||
// Although DBImpl guarantees (in recent versions) that session_lower is not
|
||||
// zero, that's not entirely sufficient to guarantee that session_etc64_ is
|
||||
// not zero (so that the 0 case can be used by CacheKey::CreateUnique*)
|
||||
if (session_etc64_ == 0U) {
|
||||
session_etc64_ = session_upper | 1U;
|
||||
}
|
||||
assert(session_etc64_ != 0);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
132
cache/cache_key.h
vendored
132
cache/cache_key.h
vendored
@ -1,132 +0,0 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "rocksdb/rocksdb_namespace.h"
|
||||
#include "rocksdb/slice.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
class Cache;
|
||||
|
||||
// A standard holder for fixed-size block cache keys (and for related caches).
|
||||
// They are created through one of these, each using its own range of values:
|
||||
// * CacheKey::CreateUniqueForCacheLifetime
|
||||
// * CacheKey::CreateUniqueForProcessLifetime
|
||||
// * Default ctor ("empty" cache key)
|
||||
// * OffsetableCacheKey->WithOffset
|
||||
//
|
||||
// The first two use atomic counters to guarantee uniqueness over the given
|
||||
// lifetime and the last uses a form of universally unique identifier for
|
||||
// uniqueness with very high probabilty (and guaranteed for files generated
|
||||
// during a single process lifetime).
|
||||
//
|
||||
// CacheKeys are currently used by calling AsSlice() to pass as a key to
|
||||
// Cache. For performance, the keys are endianness-dependent (though otherwise
|
||||
// portable). (Persistable cache entries are not intended to cross platforms.)
|
||||
class CacheKey {
|
||||
public:
|
||||
// For convenience, constructs an "empty" cache key that is never returned
|
||||
// by other means.
|
||||
inline CacheKey() : session_etc64_(), offset_etc64_() {}
|
||||
|
||||
inline bool IsEmpty() const {
|
||||
return (session_etc64_ == 0) & (offset_etc64_ == 0);
|
||||
}
|
||||
|
||||
// Use this cache key as a Slice (byte order is endianness-dependent)
|
||||
inline Slice AsSlice() const {
|
||||
static_assert(sizeof(*this) == 16, "Standardized on 16-byte cache key");
|
||||
assert(!IsEmpty());
|
||||
return Slice(reinterpret_cast<const char *>(this), sizeof(*this));
|
||||
}
|
||||
|
||||
// Create a CacheKey that is unique among others associated with this Cache
|
||||
// instance. Depends on Cache::NewId. This is useful for block cache
|
||||
// "reservations".
|
||||
static CacheKey CreateUniqueForCacheLifetime(Cache *cache);
|
||||
|
||||
// Create a CacheKey that is unique among others for the lifetime of this
|
||||
// process. This is useful for saving in a static data member so that
|
||||
// different DB instances can agree on a cache key for shared entities,
|
||||
// such as for CacheEntryStatsCollector.
|
||||
static CacheKey CreateUniqueForProcessLifetime();
|
||||
|
||||
protected:
|
||||
friend class OffsetableCacheKey;
|
||||
CacheKey(uint64_t session_etc64, uint64_t offset_etc64)
|
||||
: session_etc64_(session_etc64), offset_etc64_(offset_etc64) {}
|
||||
uint64_t session_etc64_;
|
||||
uint64_t offset_etc64_;
|
||||
};
|
||||
|
||||
// A file-specific generator of cache keys, sometimes referred to as the
|
||||
// "base" cache key for a file because all the cache keys for various offsets
|
||||
// within the file are computed using simple arithmetic. The basis for the
|
||||
// general approach is dicussed here: https://github.com/pdillinger/unique_id
|
||||
// Heavily related to GetUniqueIdFromTableProperties.
|
||||
//
|
||||
// If the db_id, db_session_id, and file_number come from the file's table
|
||||
// properties, then the keys will be stable across DB::Open/Close, backup/
|
||||
// restore, import/export, etc.
|
||||
//
|
||||
// This class "is a" CacheKey only privately so that it is not misused as
|
||||
// a ready-to-use CacheKey.
|
||||
class OffsetableCacheKey : private CacheKey {
|
||||
public:
|
||||
// For convenience, constructs an "empty" cache key that should not be used.
|
||||
inline OffsetableCacheKey() : CacheKey() {}
|
||||
|
||||
// Constructs an OffsetableCacheKey with the given information about a file.
|
||||
// max_offset is based on file size (see WithOffset) and is required here to
|
||||
// choose an appropriate (sub-)encoding. This constructor never generates an
|
||||
// "empty" base key.
|
||||
OffsetableCacheKey(const std::string &db_id, const std::string &db_session_id,
|
||||
uint64_t file_number, uint64_t max_offset);
|
||||
|
||||
inline bool IsEmpty() const {
|
||||
bool result = session_etc64_ == 0;
|
||||
assert(!(offset_etc64_ > 0 && result));
|
||||
return result;
|
||||
}
|
||||
|
||||
// Construct a CacheKey for an offset within a file, which must be
|
||||
// <= max_offset provided in constructor. An offset is not necessarily a
|
||||
// byte offset if a smaller unique identifier of keyable offsets is used.
|
||||
//
|
||||
// This class was designed to make this hot code extremely fast.
|
||||
inline CacheKey WithOffset(uint64_t offset) const {
|
||||
assert(!IsEmpty());
|
||||
assert(offset <= max_offset_);
|
||||
return CacheKey(session_etc64_, offset_etc64_ ^ offset);
|
||||
}
|
||||
|
||||
// The "common prefix" is a shared prefix for all the returned CacheKeys,
|
||||
// that also happens to usually be the same among many files in the same DB,
|
||||
// so is efficient and highly accurate (not perfectly) for DB-specific cache
|
||||
// dump selection (but not file-specific).
|
||||
static constexpr size_t kCommonPrefixSize = 8;
|
||||
inline Slice CommonPrefixSlice() const {
|
||||
static_assert(sizeof(session_etc64_) == kCommonPrefixSize,
|
||||
"8 byte common prefix expected");
|
||||
assert(!IsEmpty());
|
||||
assert(&this->session_etc64_ == static_cast<const void *>(this));
|
||||
|
||||
return Slice(reinterpret_cast<const char *>(this), kCommonPrefixSize);
|
||||
}
|
||||
|
||||
// For any max_offset <= this value, the same encoding scheme is guaranteed.
|
||||
static constexpr uint64_t kMaxOffsetStandardEncoding = 0xffffffffffU;
|
||||
|
||||
private:
|
||||
#ifndef NDEBUG
|
||||
uint64_t max_offset_ = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
183
cache/cache_reservation_manager.cc
vendored
183
cache/cache_reservation_manager.cc
vendored
@ -1,183 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#include "cache/cache_reservation_manager.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#include "cache/cache_entry_roles.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/status.h"
|
||||
#include "table/block_based/reader_common.h"
|
||||
#include "util/coding.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
template <CacheEntryRole R>
|
||||
CacheReservationManagerImpl<R>::CacheReservationHandle::CacheReservationHandle(
|
||||
std::size_t incremental_memory_used,
|
||||
std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr)
|
||||
: incremental_memory_used_(incremental_memory_used) {
|
||||
assert(cache_res_mgr);
|
||||
cache_res_mgr_ = cache_res_mgr;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
CacheReservationManagerImpl<
|
||||
R>::CacheReservationHandle::~CacheReservationHandle() {
|
||||
Status s = cache_res_mgr_->ReleaseCacheReservation(incremental_memory_used_);
|
||||
s.PermitUncheckedError();
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
CacheReservationManagerImpl<R>::CacheReservationManagerImpl(
|
||||
std::shared_ptr<Cache> cache, bool delayed_decrease)
|
||||
: delayed_decrease_(delayed_decrease),
|
||||
cache_allocated_size_(0),
|
||||
memory_used_(0) {
|
||||
assert(cache != nullptr);
|
||||
cache_ = cache;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
CacheReservationManagerImpl<R>::~CacheReservationManagerImpl() {
|
||||
for (auto* handle : dummy_handles_) {
|
||||
cache_->Release(handle, true);
|
||||
}
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Status CacheReservationManagerImpl<R>::UpdateCacheReservation(
|
||||
std::size_t new_mem_used) {
|
||||
memory_used_ = new_mem_used;
|
||||
std::size_t cur_cache_allocated_size =
|
||||
cache_allocated_size_.load(std::memory_order_relaxed);
|
||||
if (new_mem_used == cur_cache_allocated_size) {
|
||||
return Status::OK();
|
||||
} else if (new_mem_used > cur_cache_allocated_size) {
|
||||
Status s = IncreaseCacheReservation(new_mem_used);
|
||||
return s;
|
||||
} else {
|
||||
// In delayed decrease mode, we don't decrease cache reservation
|
||||
// untill the memory usage is less than 3/4 of what we reserve
|
||||
// in the cache.
|
||||
// We do this because
|
||||
// (1) Dummy entry insertion is expensive in block cache
|
||||
// (2) Delayed releasing previously inserted dummy entries can save such
|
||||
// expensive dummy entry insertion on memory increase in the near future,
|
||||
// which is likely to happen when the memory usage is greater than or equal
|
||||
// to 3/4 of what we reserve
|
||||
if (delayed_decrease_ && new_mem_used >= cur_cache_allocated_size / 4 * 3) {
|
||||
return Status::OK();
|
||||
} else {
|
||||
Status s = DecreaseCacheReservation(new_mem_used);
|
||||
return s;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Status CacheReservationManagerImpl<R>::MakeCacheReservation(
|
||||
std::size_t incremental_memory_used,
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle>* handle) {
|
||||
assert(handle);
|
||||
Status s =
|
||||
UpdateCacheReservation(GetTotalMemoryUsed() + incremental_memory_used);
|
||||
(*handle).reset(new CacheReservationManagerImpl::CacheReservationHandle(
|
||||
incremental_memory_used,
|
||||
std::enable_shared_from_this<
|
||||
CacheReservationManagerImpl<R>>::shared_from_this()));
|
||||
return s;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Status CacheReservationManagerImpl<R>::ReleaseCacheReservation(
|
||||
std::size_t incremental_memory_used) {
|
||||
assert(GetTotalMemoryUsed() >= incremental_memory_used);
|
||||
std::size_t updated_total_mem_used =
|
||||
GetTotalMemoryUsed() - incremental_memory_used;
|
||||
Status s = UpdateCacheReservation(updated_total_mem_used);
|
||||
return s;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Status CacheReservationManagerImpl<R>::IncreaseCacheReservation(
|
||||
std::size_t new_mem_used) {
|
||||
Status return_status = Status::OK();
|
||||
while (new_mem_used > cache_allocated_size_.load(std::memory_order_relaxed)) {
|
||||
Cache::Handle* handle = nullptr;
|
||||
return_status = cache_->Insert(GetNextCacheKey(), nullptr, kSizeDummyEntry,
|
||||
GetNoopDeleterForRole<R>(), &handle);
|
||||
|
||||
if (return_status != Status::OK()) {
|
||||
return return_status;
|
||||
}
|
||||
|
||||
dummy_handles_.push_back(handle);
|
||||
cache_allocated_size_ += kSizeDummyEntry;
|
||||
}
|
||||
return return_status;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Status CacheReservationManagerImpl<R>::DecreaseCacheReservation(
|
||||
std::size_t new_mem_used) {
|
||||
Status return_status = Status::OK();
|
||||
|
||||
// Decrease to the smallest multiple of kSizeDummyEntry that is greater than
|
||||
// or equal to new_mem_used We do addition instead of new_mem_used <=
|
||||
// cache_allocated_size_.load(std::memory_order_relaxed) - kSizeDummyEntry to
|
||||
// avoid underflow of size_t when cache_allocated_size_ = 0
|
||||
while (new_mem_used + kSizeDummyEntry <=
|
||||
cache_allocated_size_.load(std::memory_order_relaxed)) {
|
||||
assert(!dummy_handles_.empty());
|
||||
auto* handle = dummy_handles_.back();
|
||||
cache_->Release(handle, true);
|
||||
dummy_handles_.pop_back();
|
||||
cache_allocated_size_ -= kSizeDummyEntry;
|
||||
}
|
||||
return return_status;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
std::size_t CacheReservationManagerImpl<R>::GetTotalReservedCacheSize() {
|
||||
return cache_allocated_size_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
std::size_t CacheReservationManagerImpl<R>::GetTotalMemoryUsed() {
|
||||
return memory_used_;
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Slice CacheReservationManagerImpl<R>::GetNextCacheKey() {
|
||||
// Calling this function will have the side-effect of changing the
|
||||
// underlying cache_key_ that is shared among other keys generated from this
|
||||
// fucntion. Therefore please make sure the previous keys are saved/copied
|
||||
// before calling this function.
|
||||
cache_key_ = CacheKey::CreateUniqueForCacheLifetime(cache_.get());
|
||||
return cache_key_.AsSlice();
|
||||
}
|
||||
|
||||
template <CacheEntryRole R>
|
||||
Cache::DeleterFn CacheReservationManagerImpl<R>::TEST_GetNoopDeleterForRole() {
|
||||
return GetNoopDeleterForRole<R>();
|
||||
}
|
||||
|
||||
template class CacheReservationManagerImpl<
|
||||
CacheEntryRole::kBlockBasedTableReader>;
|
||||
template class CacheReservationManagerImpl<
|
||||
CacheEntryRole::kCompressionDictionaryBuildingBuffer>;
|
||||
template class CacheReservationManagerImpl<CacheEntryRole::kFilterConstruction>;
|
||||
template class CacheReservationManagerImpl<CacheEntryRole::kMisc>;
|
||||
template class CacheReservationManagerImpl<CacheEntryRole::kWriteBuffer>;
|
||||
} // namespace ROCKSDB_NAMESPACE
|
288
cache/cache_reservation_manager.h
vendored
288
cache/cache_reservation_manager.h
vendored
@ -1,288 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "cache/cache_entry_roles.h"
|
||||
#include "cache/cache_key.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/status.h"
|
||||
#include "util/coding.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
// CacheReservationManager is an interface for reserving cache space for the
|
||||
// memory used
|
||||
class CacheReservationManager {
|
||||
public:
|
||||
// CacheReservationHandle is for managing the lifetime of a cache reservation
|
||||
// for an incremental amount of memory used (i.e, incremental_memory_used)
|
||||
class CacheReservationHandle {
|
||||
public:
|
||||
virtual ~CacheReservationHandle() {}
|
||||
};
|
||||
virtual ~CacheReservationManager() {}
|
||||
virtual Status UpdateCacheReservation(std::size_t new_memory_used) = 0;
|
||||
virtual Status MakeCacheReservation(
|
||||
std::size_t incremental_memory_used,
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
||||
*handle) = 0;
|
||||
virtual std::size_t GetTotalReservedCacheSize() = 0;
|
||||
virtual std::size_t GetTotalMemoryUsed() = 0;
|
||||
};
|
||||
|
||||
// CacheReservationManagerImpl implements interface CacheReservationManager
|
||||
// for reserving cache space for the memory used by inserting/releasing dummy
|
||||
// entries in the cache.
|
||||
//
|
||||
// This class is NOT thread-safe, except that GetTotalReservedCacheSize()
|
||||
// can be called without external synchronization.
|
||||
template <CacheEntryRole R>
|
||||
class CacheReservationManagerImpl
|
||||
: public CacheReservationManager,
|
||||
public std::enable_shared_from_this<CacheReservationManagerImpl<R>> {
|
||||
public:
|
||||
class CacheReservationHandle
|
||||
: public CacheReservationManager::CacheReservationHandle {
|
||||
public:
|
||||
CacheReservationHandle(
|
||||
std::size_t incremental_memory_used,
|
||||
std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr);
|
||||
~CacheReservationHandle() override;
|
||||
|
||||
private:
|
||||
std::size_t incremental_memory_used_;
|
||||
std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr_;
|
||||
};
|
||||
|
||||
// Construct a CacheReservationManagerImpl
|
||||
// @param cache The cache where dummy entries are inserted and released for
|
||||
// reserving cache space
|
||||
// @param delayed_decrease If set true, then dummy entries won't be released
|
||||
// immediately when memory usage decreases.
|
||||
// Instead, it will be released when the memory usage
|
||||
// decreases to 3/4 of what we have reserved so far.
|
||||
// This is for saving some future dummy entry
|
||||
// insertion when memory usage increases are likely to
|
||||
// happen in the near future.
|
||||
//
|
||||
// REQUIRED: cache is not nullptr
|
||||
explicit CacheReservationManagerImpl(std::shared_ptr<Cache> cache,
|
||||
bool delayed_decrease = false);
|
||||
|
||||
// no copy constructor, copy assignment, move constructor, move assignment
|
||||
CacheReservationManagerImpl(const CacheReservationManagerImpl &) = delete;
|
||||
CacheReservationManagerImpl &operator=(const CacheReservationManagerImpl &) =
|
||||
delete;
|
||||
CacheReservationManagerImpl(CacheReservationManagerImpl &&) = delete;
|
||||
CacheReservationManagerImpl &operator=(CacheReservationManagerImpl &&) =
|
||||
delete;
|
||||
|
||||
~CacheReservationManagerImpl() override;
|
||||
|
||||
// One of the two ways of reserving/releasing cache space,
|
||||
// see MakeCacheReservation() for the other.
|
||||
//
|
||||
// Use ONLY one of these two ways to prevent unexpected behavior.
|
||||
//
|
||||
// Insert and release dummy entries in the cache to
|
||||
// match the size of total dummy entries with the least multiple of
|
||||
// kSizeDummyEntry greater than or equal to new_mem_used
|
||||
//
|
||||
// Insert dummy entries if new_memory_used > cache_allocated_size_;
|
||||
//
|
||||
// Release dummy entries if new_memory_used < cache_allocated_size_
|
||||
// (and new_memory_used < cache_allocated_size_ * 3/4
|
||||
// when delayed_decrease is set true);
|
||||
//
|
||||
// Keey dummy entries the same if (1) new_memory_used == cache_allocated_size_
|
||||
// or (2) new_memory_used is in the interval of
|
||||
// [cache_allocated_size_ * 3/4, cache_allocated_size) when delayed_decrease
|
||||
// is set true.
|
||||
//
|
||||
// @param new_memory_used The number of bytes used by new memory
|
||||
// The most recent new_memoy_used passed in will be returned
|
||||
// in GetTotalMemoryUsed() even when the call return non-ok status.
|
||||
//
|
||||
// Since the class is NOT thread-safe, external synchronization on the
|
||||
// order of calling UpdateCacheReservation() is needed if you want
|
||||
// GetTotalMemoryUsed() indeed returns the latest memory used.
|
||||
//
|
||||
// @return On inserting dummy entries, it returns Status::OK() if all dummy
|
||||
// entry insertions succeed.
|
||||
// Otherwise, it returns the first non-ok status;
|
||||
// On releasing dummy entries, it always returns Status::OK().
|
||||
// On keeping dummy entries the same, it always returns Status::OK().
|
||||
Status UpdateCacheReservation(std::size_t new_memory_used) override;
|
||||
|
||||
// One of the two ways of reserving cache space and releasing is done through
|
||||
// destruction of CacheReservationHandle.
|
||||
// See UpdateCacheReservation() for the other way.
|
||||
//
|
||||
// Use ONLY one of these two ways to prevent unexpected behavior.
|
||||
//
|
||||
// Insert dummy entries in the cache for the incremental memory usage
|
||||
// to match the size of total dummy entries with the least multiple of
|
||||
// kSizeDummyEntry greater than or equal to the total memory used.
|
||||
//
|
||||
// A CacheReservationHandle is returned as an output parameter.
|
||||
// The reserved dummy entries are automatically released on the destruction of
|
||||
// this handle, which achieves better RAII per cache reservation.
|
||||
//
|
||||
// WARNING: Deallocate all the handles of the CacheReservationManager object
|
||||
// before deallocating the object to prevent unexpected behavior.
|
||||
//
|
||||
// @param incremental_memory_used The number of bytes increased in memory
|
||||
// usage.
|
||||
//
|
||||
// Calling GetTotalMemoryUsed() afterward will return the total memory
|
||||
// increased by this number, even when calling MakeCacheReservation()
|
||||
// returns non-ok status.
|
||||
//
|
||||
// Since the class is NOT thread-safe, external synchronization in
|
||||
// calling MakeCacheReservation() is needed if you want
|
||||
// GetTotalMemoryUsed() indeed returns the latest memory used.
|
||||
//
|
||||
// @param handle An pointer to std::unique_ptr<CacheReservationHandle> that
|
||||
// manages the lifetime of the cache reservation represented by the
|
||||
// handle.
|
||||
//
|
||||
// @return It returns Status::OK() if all dummy
|
||||
// entry insertions succeed.
|
||||
// Otherwise, it returns the first non-ok status;
|
||||
//
|
||||
// REQUIRES: handle != nullptr
|
||||
Status MakeCacheReservation(
|
||||
std::size_t incremental_memory_used,
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle> *handle)
|
||||
override;
|
||||
|
||||
// Return the size of the cache (which is a multiple of kSizeDummyEntry)
|
||||
// successfully reserved by calling UpdateCacheReservation().
|
||||
//
|
||||
// When UpdateCacheReservation() returns non-ok status,
|
||||
// calling GetTotalReservedCacheSize() after that might return a slightly
|
||||
// smaller number than the actual reserved cache size due to
|
||||
// the returned number will always be a multiple of kSizeDummyEntry
|
||||
// and cache full might happen in the middle of inserting a dummy entry.
|
||||
std::size_t GetTotalReservedCacheSize() override;
|
||||
|
||||
// Return the latest total memory used indicated by the most recent call of
|
||||
// UpdateCacheReservation(std::size_t new_memory_used);
|
||||
std::size_t GetTotalMemoryUsed() override;
|
||||
|
||||
static constexpr std::size_t GetDummyEntrySize() { return kSizeDummyEntry; }
|
||||
|
||||
// For testing only - it is to help ensure the NoopDeleterForRole<R>
|
||||
// accessed from CacheReservationManagerImpl and the one accessed from the
|
||||
// test are from the same translation units
|
||||
static Cache::DeleterFn TEST_GetNoopDeleterForRole();
|
||||
|
||||
private:
|
||||
static constexpr std::size_t kSizeDummyEntry = 256 * 1024;
|
||||
|
||||
Slice GetNextCacheKey();
|
||||
|
||||
Status ReleaseCacheReservation(std::size_t incremental_memory_used);
|
||||
Status IncreaseCacheReservation(std::size_t new_mem_used);
|
||||
Status DecreaseCacheReservation(std::size_t new_mem_used);
|
||||
|
||||
std::shared_ptr<Cache> cache_;
|
||||
bool delayed_decrease_;
|
||||
std::atomic<std::size_t> cache_allocated_size_;
|
||||
std::size_t memory_used_;
|
||||
std::vector<Cache::Handle *> dummy_handles_;
|
||||
CacheKey cache_key_;
|
||||
};
|
||||
|
||||
class ConcurrentCacheReservationManager
|
||||
: public CacheReservationManager,
|
||||
public std::enable_shared_from_this<ConcurrentCacheReservationManager> {
|
||||
public:
|
||||
class CacheReservationHandle
|
||||
: public CacheReservationManager::CacheReservationHandle {
|
||||
public:
|
||||
CacheReservationHandle(
|
||||
std::shared_ptr<ConcurrentCacheReservationManager> cache_res_mgr,
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
||||
cache_res_handle) {
|
||||
assert(cache_res_mgr && cache_res_handle);
|
||||
cache_res_mgr_ = cache_res_mgr;
|
||||
cache_res_handle_ = std::move(cache_res_handle);
|
||||
}
|
||||
|
||||
~CacheReservationHandle() override {
|
||||
std::lock_guard<std::mutex> lock(cache_res_mgr_->cache_res_mgr_mu_);
|
||||
cache_res_handle_.reset();
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ConcurrentCacheReservationManager> cache_res_mgr_;
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
||||
cache_res_handle_;
|
||||
};
|
||||
|
||||
explicit ConcurrentCacheReservationManager(
|
||||
std::shared_ptr<CacheReservationManager> cache_res_mgr) {
|
||||
cache_res_mgr_ = std::move(cache_res_mgr);
|
||||
}
|
||||
ConcurrentCacheReservationManager(const ConcurrentCacheReservationManager &) =
|
||||
delete;
|
||||
ConcurrentCacheReservationManager &operator=(
|
||||
const ConcurrentCacheReservationManager &) = delete;
|
||||
ConcurrentCacheReservationManager(ConcurrentCacheReservationManager &&) =
|
||||
delete;
|
||||
ConcurrentCacheReservationManager &operator=(
|
||||
ConcurrentCacheReservationManager &&) = delete;
|
||||
|
||||
~ConcurrentCacheReservationManager() override {}
|
||||
|
||||
inline Status UpdateCacheReservation(std::size_t new_memory_used) override {
|
||||
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
||||
return cache_res_mgr_->UpdateCacheReservation(new_memory_used);
|
||||
}
|
||||
inline Status MakeCacheReservation(
|
||||
std::size_t incremental_memory_used,
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle> *handle)
|
||||
override {
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle>
|
||||
wrapped_handle;
|
||||
Status s;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
||||
s = cache_res_mgr_->MakeCacheReservation(incremental_memory_used,
|
||||
&wrapped_handle);
|
||||
}
|
||||
(*handle).reset(
|
||||
new ConcurrentCacheReservationManager::CacheReservationHandle(
|
||||
std::enable_shared_from_this<
|
||||
ConcurrentCacheReservationManager>::shared_from_this(),
|
||||
std::move(wrapped_handle)));
|
||||
return s;
|
||||
}
|
||||
inline std::size_t GetTotalReservedCacheSize() override {
|
||||
return cache_res_mgr_->GetTotalReservedCacheSize();
|
||||
}
|
||||
inline std::size_t GetTotalMemoryUsed() override {
|
||||
std::lock_guard<std::mutex> lock(cache_res_mgr_mu_);
|
||||
return cache_res_mgr_->GetTotalMemoryUsed();
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex cache_res_mgr_mu_;
|
||||
std::shared_ptr<CacheReservationManager> cache_res_mgr_;
|
||||
};
|
||||
} // namespace ROCKSDB_NAMESPACE
|
468
cache/cache_reservation_manager_test.cc
vendored
468
cache/cache_reservation_manager_test.cc
vendored
@ -1,468 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#include "cache/cache_reservation_manager.h"
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#include "cache/cache_entry_roles.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "test_util/testharness.h"
|
||||
#include "util/coding.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
class CacheReservationManagerTest : public ::testing::Test {
|
||||
protected:
|
||||
static constexpr std::size_t kSizeDummyEntry =
|
||||
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
||||
static constexpr std::size_t kCacheCapacity = 4096 * kSizeDummyEntry;
|
||||
static constexpr int kNumShardBits = 0; // 2^0 shard
|
||||
static constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
||||
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(kCacheCapacity, kNumShardBits);
|
||||
std::shared_ptr<CacheReservationManager> test_cache_rev_mng;
|
||||
|
||||
CacheReservationManagerTest() {
|
||||
test_cache_rev_mng =
|
||||
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
||||
cache);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(CacheReservationManagerTest, GenerateCacheKey) {
|
||||
std::size_t new_mem_used = 1 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
ASSERT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry);
|
||||
ASSERT_LT(cache->GetPinnedUsage(),
|
||||
1 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
||||
|
||||
// Next unique Cache key
|
||||
CacheKey ckey = CacheKey::CreateUniqueForCacheLifetime(cache.get());
|
||||
// Get to the underlying values
|
||||
using PairU64 = std::array<uint64_t, 2>;
|
||||
auto& ckey_pair = *reinterpret_cast<PairU64*>(&ckey);
|
||||
// Back it up to the one used by CRM (using CacheKey implementation details)
|
||||
ckey_pair[1]--;
|
||||
|
||||
// Specific key (subject to implementation details)
|
||||
EXPECT_EQ(ckey_pair, PairU64({0, 2}));
|
||||
|
||||
Cache::Handle* handle = cache->Lookup(ckey.AsSlice());
|
||||
EXPECT_NE(handle, nullptr)
|
||||
<< "Failed to generate the cache key for the dummy entry correctly";
|
||||
// Clean up the returned handle from Lookup() to prevent memory leak
|
||||
cache->Release(handle);
|
||||
}
|
||||
|
||||
TEST_F(CacheReservationManagerTest, KeepCacheReservationTheSame) {
|
||||
std::size_t new_mem_used = 1 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
1 * kSizeDummyEntry);
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
||||
std::size_t initial_pinned_usage = cache->GetPinnedUsage();
|
||||
ASSERT_GE(initial_pinned_usage, 1 * kSizeDummyEntry);
|
||||
ASSERT_LT(initial_pinned_usage,
|
||||
1 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
||||
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to keep cache reservation the same when new_mem_used equals "
|
||||
"to current cache reservation";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
1 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep correctly when new_mem_used equals to current "
|
||||
"cache reservation";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly when new_mem_used "
|
||||
"equals to current cache reservation";
|
||||
EXPECT_EQ(cache->GetPinnedUsage(), initial_pinned_usage)
|
||||
<< "Failed to keep underlying dummy entries the same when new_mem_used "
|
||||
"equals to current cache reservation";
|
||||
}
|
||||
|
||||
TEST_F(CacheReservationManagerTest,
|
||||
IncreaseCacheReservationByMultiplesOfDummyEntrySize) {
|
||||
std::size_t new_mem_used = 2 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to increase cache reservation correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
2 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep cache reservation increase correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry)
|
||||
<< "Failed to increase underlying dummy entries in cache correctly";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
2 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to increase underlying dummy entries in cache correctly";
|
||||
}
|
||||
|
||||
TEST_F(CacheReservationManagerTest,
|
||||
IncreaseCacheReservationNotByMultiplesOfDummyEntrySize) {
|
||||
std::size_t new_mem_used = 2 * kSizeDummyEntry + kSizeDummyEntry / 2;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to increase cache reservation correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
3 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep cache reservation increase correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 3 * kSizeDummyEntry)
|
||||
<< "Failed to increase underlying dummy entries in cache correctly";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
3 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to increase underlying dummy entries in cache correctly";
|
||||
}
|
||||
|
||||
TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest,
|
||||
IncreaseCacheReservationOnFullCache) {
|
||||
;
|
||||
constexpr std::size_t kSizeDummyEntry =
|
||||
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
||||
constexpr std::size_t kSmallCacheCapacity = 4 * kSizeDummyEntry;
|
||||
constexpr std::size_t kBigCacheCapacity = 4096 * kSizeDummyEntry;
|
||||
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
||||
|
||||
LRUCacheOptions lo;
|
||||
lo.capacity = kSmallCacheCapacity;
|
||||
lo.num_shard_bits = 0; // 2^0 shard
|
||||
lo.strict_capacity_limit = true;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
||||
std::shared_ptr<CacheReservationManager> test_cache_rev_mng =
|
||||
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
||||
cache);
|
||||
|
||||
std::size_t new_mem_used = kSmallCacheCapacity + 1;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::Incomplete())
|
||||
<< "Failed to return status to indicate failure of dummy entry insertion "
|
||||
"during cache reservation on full cache";
|
||||
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
1 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep correctly before cache resevation failure happens "
|
||||
"due to full cache";
|
||||
EXPECT_LE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
kSmallCacheCapacity)
|
||||
<< "Failed to bookkeep correctly (i.e, bookkeep only successful dummy "
|
||||
"entry insertions) when encountering cache resevation failure due to "
|
||||
"full cache";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
||||
<< "Failed to insert underlying dummy entries correctly when "
|
||||
"encountering cache resevation failure due to full cache";
|
||||
EXPECT_LE(cache->GetPinnedUsage(), kSmallCacheCapacity)
|
||||
<< "Failed to insert underlying dummy entries correctly when "
|
||||
"encountering cache resevation failure due to full cache";
|
||||
|
||||
new_mem_used = kSmallCacheCapacity / 2; // 2 dummy entries
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to decrease cache reservation after encountering cache "
|
||||
"reservation failure due to full cache";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
2 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep cache reservation decrease correctly after "
|
||||
"encountering cache reservation due to full cache";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry)
|
||||
<< "Failed to release underlying dummy entries correctly on cache "
|
||||
"reservation decrease after encountering cache resevation failure due "
|
||||
"to full cache";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
2 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to release underlying dummy entries correctly on cache "
|
||||
"reservation decrease after encountering cache resevation failure due "
|
||||
"to full cache";
|
||||
|
||||
// Create cache full again for subsequent tests
|
||||
new_mem_used = kSmallCacheCapacity + 1;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::Incomplete())
|
||||
<< "Failed to return status to indicate failure of dummy entry insertion "
|
||||
"during cache reservation on full cache";
|
||||
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
1 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep correctly before cache resevation failure happens "
|
||||
"due to full cache";
|
||||
EXPECT_LE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
kSmallCacheCapacity)
|
||||
<< "Failed to bookkeep correctly (i.e, bookkeep only successful dummy "
|
||||
"entry insertions) when encountering cache resevation failure due to "
|
||||
"full cache";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
||||
<< "Failed to insert underlying dummy entries correctly when "
|
||||
"encountering cache resevation failure due to full cache";
|
||||
EXPECT_LE(cache->GetPinnedUsage(), kSmallCacheCapacity)
|
||||
<< "Failed to insert underlying dummy entries correctly when "
|
||||
"encountering cache resevation failure due to full cache";
|
||||
|
||||
// Increase cache capacity so the previously failed insertion can fully
|
||||
// succeed
|
||||
cache->SetCapacity(kBigCacheCapacity);
|
||||
new_mem_used = kSmallCacheCapacity + 1;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to increase cache reservation after increasing cache capacity "
|
||||
"and mitigating cache full error";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
5 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep cache reservation increase correctly after "
|
||||
"increasing cache capacity and mitigating cache full error";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 5 * kSizeDummyEntry)
|
||||
<< "Failed to insert underlying dummy entries correctly after increasing "
|
||||
"cache capacity and mitigating cache full error";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
5 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to insert underlying dummy entries correctly after increasing "
|
||||
"cache capacity and mitigating cache full error";
|
||||
}
|
||||
|
||||
TEST_F(CacheReservationManagerTest,
|
||||
DecreaseCacheReservationByMultiplesOfDummyEntrySize) {
|
||||
std::size_t new_mem_used = 2 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
2 * kSizeDummyEntry);
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
||||
ASSERT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry);
|
||||
ASSERT_LT(cache->GetPinnedUsage(),
|
||||
2 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
||||
|
||||
new_mem_used = 1 * kSizeDummyEntry;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to decrease cache reservation correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
1 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep cache reservation decrease correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
||||
<< "Failed to decrease underlying dummy entries in cache correctly";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
1 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to decrease underlying dummy entries in cache correctly";
|
||||
}
|
||||
|
||||
TEST_F(CacheReservationManagerTest,
|
||||
DecreaseCacheReservationNotByMultiplesOfDummyEntrySize) {
|
||||
std::size_t new_mem_used = 2 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
2 * kSizeDummyEntry);
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
||||
ASSERT_GE(cache->GetPinnedUsage(), 2 * kSizeDummyEntry);
|
||||
ASSERT_LT(cache->GetPinnedUsage(),
|
||||
2 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
||||
|
||||
new_mem_used = kSizeDummyEntry / 2;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to decrease cache reservation correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
1 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep cache reservation decrease correctly";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry)
|
||||
<< "Failed to decrease underlying dummy entries in cache correctly";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
1 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to decrease underlying dummy entries in cache correctly";
|
||||
}
|
||||
|
||||
TEST(CacheReservationManagerWithDelayedDecreaseTest,
|
||||
DecreaseCacheReservationWithDelayedDecrease) {
|
||||
constexpr std::size_t kSizeDummyEntry =
|
||||
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
||||
constexpr std::size_t kCacheCapacity = 4096 * kSizeDummyEntry;
|
||||
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
||||
|
||||
LRUCacheOptions lo;
|
||||
lo.capacity = kCacheCapacity;
|
||||
lo.num_shard_bits = 0;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
||||
std::shared_ptr<CacheReservationManager> test_cache_rev_mng =
|
||||
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
||||
cache, true /* delayed_decrease */);
|
||||
|
||||
std::size_t new_mem_used = 8 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
8 * kSizeDummyEntry);
|
||||
ASSERT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used);
|
||||
std::size_t initial_pinned_usage = cache->GetPinnedUsage();
|
||||
ASSERT_GE(initial_pinned_usage, 8 * kSizeDummyEntry);
|
||||
ASSERT_LT(initial_pinned_usage,
|
||||
8 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
||||
|
||||
new_mem_used = 6 * kSizeDummyEntry;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK()) << "Failed to delay decreasing cache reservation";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
8 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep correctly when delaying cache reservation "
|
||||
"decrease";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_EQ(cache->GetPinnedUsage(), initial_pinned_usage)
|
||||
<< "Failed to delay decreasing underlying dummy entries in cache";
|
||||
|
||||
new_mem_used = 7 * kSizeDummyEntry;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK()) << "Failed to delay decreasing cache reservation";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
8 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep correctly when delaying cache reservation "
|
||||
"decrease";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_EQ(cache->GetPinnedUsage(), initial_pinned_usage)
|
||||
<< "Failed to delay decreasing underlying dummy entries in cache";
|
||||
|
||||
new_mem_used = 6 * kSizeDummyEntry - 1;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::OK())
|
||||
<< "Failed to decrease cache reservation correctly when new_mem_used < "
|
||||
"GetTotalReservedCacheSize() * 3 / 4 on delayed decrease mode";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
6 * kSizeDummyEntry)
|
||||
<< "Failed to bookkeep correctly when new_mem_used < "
|
||||
"GetTotalReservedCacheSize() * 3 / 4 on delayed decrease mode";
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), new_mem_used)
|
||||
<< "Failed to bookkeep the used memory correctly";
|
||||
EXPECT_GE(cache->GetPinnedUsage(), 6 * kSizeDummyEntry)
|
||||
<< "Failed to decrease underlying dummy entries in cache when "
|
||||
"new_mem_used < GetTotalReservedCacheSize() * 3 / 4 on delayed "
|
||||
"decrease mode";
|
||||
EXPECT_LT(cache->GetPinnedUsage(),
|
||||
6 * kSizeDummyEntry + kMetaDataChargeOverhead)
|
||||
<< "Failed to decrease underlying dummy entries in cache when "
|
||||
"new_mem_used < GetTotalReservedCacheSize() * 3 / 4 on delayed "
|
||||
"decrease mode";
|
||||
}
|
||||
|
||||
TEST(CacheReservationManagerDestructorTest,
|
||||
ReleaseRemainingDummyEntriesOnDestruction) {
|
||||
constexpr std::size_t kSizeDummyEntry =
|
||||
CacheReservationManagerImpl<CacheEntryRole::kMisc>::GetDummyEntrySize();
|
||||
constexpr std::size_t kCacheCapacity = 4096 * kSizeDummyEntry;
|
||||
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
||||
|
||||
LRUCacheOptions lo;
|
||||
lo.capacity = kCacheCapacity;
|
||||
lo.num_shard_bits = 0;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
||||
{
|
||||
std::shared_ptr<CacheReservationManager> test_cache_rev_mng =
|
||||
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
||||
cache);
|
||||
std::size_t new_mem_used = 1 * kSizeDummyEntry;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
ASSERT_GE(cache->GetPinnedUsage(), 1 * kSizeDummyEntry);
|
||||
ASSERT_LT(cache->GetPinnedUsage(),
|
||||
1 * kSizeDummyEntry + kMetaDataChargeOverhead);
|
||||
}
|
||||
EXPECT_EQ(cache->GetPinnedUsage(), 0 * kSizeDummyEntry)
|
||||
<< "Failed to release remaining underlying dummy entries in cache in "
|
||||
"CacheReservationManager's destructor";
|
||||
}
|
||||
|
||||
TEST(CacheReservationHandleTest, HandleTest) {
|
||||
constexpr std::size_t kOneGigabyte = 1024 * 1024 * 1024;
|
||||
constexpr std::size_t kSizeDummyEntry = 256 * 1024;
|
||||
constexpr std::size_t kMetaDataChargeOverhead = 10000;
|
||||
|
||||
LRUCacheOptions lo;
|
||||
lo.capacity = kOneGigabyte;
|
||||
lo.num_shard_bits = 0;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
||||
|
||||
std::shared_ptr<CacheReservationManager> test_cache_rev_mng(
|
||||
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
||||
cache));
|
||||
|
||||
std::size_t mem_used = 0;
|
||||
const std::size_t incremental_mem_used_handle_1 = 1 * kSizeDummyEntry;
|
||||
const std::size_t incremental_mem_used_handle_2 = 2 * kSizeDummyEntry;
|
||||
std::unique_ptr<CacheReservationManager::CacheReservationHandle> handle_1,
|
||||
handle_2;
|
||||
|
||||
// To test consecutive CacheReservationManager::MakeCacheReservation works
|
||||
// correctly in terms of returning the handle as well as updating cache
|
||||
// reservation and the latest total memory used
|
||||
Status s = test_cache_rev_mng->MakeCacheReservation(
|
||||
incremental_mem_used_handle_1, &handle_1);
|
||||
mem_used = mem_used + incremental_mem_used_handle_1;
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
EXPECT_TRUE(handle_1 != nullptr);
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(), mem_used);
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), mem_used);
|
||||
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
||||
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
||||
|
||||
s = test_cache_rev_mng->MakeCacheReservation(incremental_mem_used_handle_2,
|
||||
&handle_2);
|
||||
mem_used = mem_used + incremental_mem_used_handle_2;
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
EXPECT_TRUE(handle_2 != nullptr);
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(), mem_used);
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), mem_used);
|
||||
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
||||
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
||||
|
||||
// To test
|
||||
// CacheReservationManager::CacheReservationHandle::~CacheReservationHandle()
|
||||
// works correctly in releasing the cache reserved for the handle
|
||||
handle_1.reset();
|
||||
EXPECT_TRUE(handle_1 == nullptr);
|
||||
mem_used = mem_used - incremental_mem_used_handle_1;
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalReservedCacheSize(), mem_used);
|
||||
EXPECT_EQ(test_cache_rev_mng->GetTotalMemoryUsed(), mem_used);
|
||||
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
||||
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
||||
|
||||
// To test the actual CacheReservationManager object won't be deallocated
|
||||
// as long as there remain handles pointing to it.
|
||||
// We strongly recommend deallocating CacheReservationManager object only
|
||||
// after all its handles are deallocated to keep things easy to reasonate
|
||||
test_cache_rev_mng.reset();
|
||||
EXPECT_GE(cache->GetPinnedUsage(), mem_used);
|
||||
EXPECT_LT(cache->GetPinnedUsage(), mem_used + kMetaDataChargeOverhead);
|
||||
|
||||
handle_2.reset();
|
||||
// The CacheReservationManager object is now deallocated since all the handles
|
||||
// and its original pointer is gone
|
||||
mem_used = mem_used - incremental_mem_used_handle_2;
|
||||
EXPECT_EQ(mem_used, 0);
|
||||
EXPECT_EQ(cache->GetPinnedUsage(), mem_used);
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
251
cache/cache_test.cc
vendored
251
cache/cache_test.cc
vendored
@ -14,15 +14,13 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "cache/clock_cache.h"
|
||||
#include "cache/fast_lru_cache.h"
|
||||
#include "cache/lru_cache.h"
|
||||
#include "test_util/testharness.h"
|
||||
#include "util/coding.h"
|
||||
#include "util/string_util.h"
|
||||
#include "util/testharness.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace rocksdb {
|
||||
|
||||
// Conversions between numeric keys/values and the types expected by Cache.
|
||||
static std::string EncodeKey(int k) {
|
||||
@ -41,7 +39,6 @@ static int DecodeValue(void* v) {
|
||||
|
||||
const std::string kLRU = "lru";
|
||||
const std::string kClock = "clock";
|
||||
const std::string kFast = "fast";
|
||||
|
||||
void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
|
||||
|
||||
@ -86,32 +83,17 @@ class CacheTest : public testing::TestWithParam<std::string> {
|
||||
if (type == kClock) {
|
||||
return NewClockCache(capacity);
|
||||
}
|
||||
if (type == kFast) {
|
||||
return NewFastLRUCache(capacity);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> NewCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
|
||||
std::shared_ptr<Cache> NewCache(size_t capacity, int num_shard_bits,
|
||||
bool strict_capacity_limit) {
|
||||
auto type = GetParam();
|
||||
if (type == kLRU) {
|
||||
LRUCacheOptions co;
|
||||
co.capacity = capacity;
|
||||
co.num_shard_bits = num_shard_bits;
|
||||
co.strict_capacity_limit = strict_capacity_limit;
|
||||
co.high_pri_pool_ratio = 0;
|
||||
co.metadata_charge_policy = charge_policy;
|
||||
return NewLRUCache(co);
|
||||
return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
|
||||
}
|
||||
if (type == kClock) {
|
||||
return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||
charge_policy);
|
||||
}
|
||||
if (type == kFast) {
|
||||
return NewFastLRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||
charge_policy);
|
||||
return NewClockCache(capacity, num_shard_bits, strict_capacity_limit);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@ -127,8 +109,8 @@ class CacheTest : public testing::TestWithParam<std::string> {
|
||||
|
||||
void Insert(std::shared_ptr<Cache> cache, int key, int value,
|
||||
int charge = 1) {
|
||||
EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
|
||||
&CacheTest::Deleter));
|
||||
cache->Insert(EncodeKey(key), EncodeValue(value), charge,
|
||||
&CacheTest::Deleter);
|
||||
}
|
||||
|
||||
void Erase(std::shared_ptr<Cache> cache, int key) {
|
||||
@ -161,15 +143,10 @@ class CacheTest : public testing::TestWithParam<std::string> {
|
||||
};
|
||||
CacheTest* CacheTest::current_;
|
||||
|
||||
class LRUCacheTest : public CacheTest {};
|
||||
|
||||
TEST_P(CacheTest, UsageTest) {
|
||||
// cache is std::shared_ptr and will be automatically cleaned up.
|
||||
const uint64_t kCapacity = 100000;
|
||||
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
||||
auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
|
||||
ASSERT_EQ(0, cache->GetUsage());
|
||||
ASSERT_EQ(0, precise_cache->GetUsage());
|
||||
auto cache = NewCache(kCapacity, 8, false);
|
||||
|
||||
size_t usage = 0;
|
||||
char value[10] = "abcdef";
|
||||
@ -177,47 +154,32 @@ TEST_P(CacheTest, UsageTest) {
|
||||
for (int i = 1; i < 100; ++i) {
|
||||
std::string key(i, 'a');
|
||||
auto kv_size = key.size() + 5;
|
||||
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
|
||||
dumbDeleter));
|
||||
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
||||
kv_size, dumbDeleter));
|
||||
cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter);
|
||||
usage += kv_size;
|
||||
ASSERT_EQ(usage, cache->GetUsage());
|
||||
ASSERT_LT(usage, precise_cache->GetUsage());
|
||||
}
|
||||
|
||||
cache->EraseUnRefEntries();
|
||||
precise_cache->EraseUnRefEntries();
|
||||
ASSERT_EQ(0, cache->GetUsage());
|
||||
ASSERT_EQ(0, precise_cache->GetUsage());
|
||||
|
||||
// make sure the cache will be overloaded
|
||||
for (uint64_t i = 1; i < kCapacity; ++i) {
|
||||
auto key = std::to_string(i);
|
||||
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
||||
dumbDeleter));
|
||||
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
||||
key.size() + 5, dumbDeleter));
|
||||
auto key = ToString(i);
|
||||
cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
||||
dumbDeleter);
|
||||
}
|
||||
|
||||
// the usage should be close to the capacity
|
||||
ASSERT_GT(kCapacity, cache->GetUsage());
|
||||
ASSERT_GT(kCapacity, precise_cache->GetUsage());
|
||||
ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
|
||||
ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, PinnedUsageTest) {
|
||||
// cache is std::shared_ptr and will be automatically cleaned up.
|
||||
const uint64_t kCapacity = 200000;
|
||||
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
||||
auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
|
||||
const uint64_t kCapacity = 100000;
|
||||
auto cache = NewCache(kCapacity, 8, false);
|
||||
|
||||
size_t pinned_usage = 0;
|
||||
char value[10] = "abcdef";
|
||||
|
||||
std::forward_list<Cache::Handle*> unreleased_handles;
|
||||
std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
|
||||
|
||||
// Add entries. Unpin some of them after insertion. Then, pin some of them
|
||||
// again. Check GetPinnedUsage().
|
||||
@ -225,73 +187,40 @@ TEST_P(CacheTest, PinnedUsageTest) {
|
||||
std::string key(i, 'a');
|
||||
auto kv_size = key.size() + 5;
|
||||
Cache::Handle* handle;
|
||||
Cache::Handle* handle_in_precise_cache;
|
||||
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
|
||||
dumbDeleter, &handle));
|
||||
assert(handle);
|
||||
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
||||
kv_size, dumbDeleter,
|
||||
&handle_in_precise_cache));
|
||||
assert(handle_in_precise_cache);
|
||||
cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter,
|
||||
&handle);
|
||||
pinned_usage += kv_size;
|
||||
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
||||
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
|
||||
if (i % 2 == 0) {
|
||||
cache->Release(handle);
|
||||
precise_cache->Release(handle_in_precise_cache);
|
||||
pinned_usage -= kv_size;
|
||||
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
||||
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
|
||||
} else {
|
||||
unreleased_handles.push_front(handle);
|
||||
unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
|
||||
}
|
||||
if (i % 3 == 0) {
|
||||
unreleased_handles.push_front(cache->Lookup(key));
|
||||
auto x = precise_cache->Lookup(key);
|
||||
assert(x);
|
||||
unreleased_handles_in_precise_cache.push_front(x);
|
||||
// If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
|
||||
// usage increased
|
||||
if (i % 2 == 0) {
|
||||
pinned_usage += kv_size;
|
||||
}
|
||||
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
||||
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
|
||||
}
|
||||
}
|
||||
auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
|
||||
ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
|
||||
|
||||
// check that overloading the cache does not change the pinned usage
|
||||
for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
|
||||
auto key = std::to_string(i);
|
||||
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
||||
dumbDeleter));
|
||||
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
||||
key.size() + 5, dumbDeleter));
|
||||
auto key = ToString(i);
|
||||
cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
||||
dumbDeleter);
|
||||
}
|
||||
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
||||
ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
|
||||
|
||||
cache->EraseUnRefEntries();
|
||||
precise_cache->EraseUnRefEntries();
|
||||
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
||||
ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
|
||||
|
||||
// release handles for pinned entries to prevent memory leaks
|
||||
for (auto handle : unreleased_handles) {
|
||||
cache->Release(handle);
|
||||
}
|
||||
for (auto handle : unreleased_handles_in_precise_cache) {
|
||||
precise_cache->Release(handle);
|
||||
}
|
||||
ASSERT_EQ(0, cache->GetPinnedUsage());
|
||||
ASSERT_EQ(0, precise_cache->GetPinnedUsage());
|
||||
cache->EraseUnRefEntries();
|
||||
precise_cache->EraseUnRefEntries();
|
||||
ASSERT_EQ(0, cache->GetUsage());
|
||||
ASSERT_EQ(0, precise_cache->GetUsage());
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, HitAndMiss) {
|
||||
@ -377,7 +306,7 @@ TEST_P(CacheTest, EvictionPolicy) {
|
||||
Insert(200, 201);
|
||||
|
||||
// Frequently used entry must be kept around
|
||||
for (int i = 0; i < kCacheSize * 2; i++) {
|
||||
for (int i = 0; i < kCacheSize + 200; i++) {
|
||||
Insert(1000+i, 2000+i);
|
||||
ASSERT_EQ(101, Lookup(100));
|
||||
}
|
||||
@ -430,7 +359,7 @@ TEST_P(CacheTest, EvictionPolicyRef) {
|
||||
Insert(303, 104);
|
||||
|
||||
// Insert entries much more than Cache capacity
|
||||
for (int i = 0; i < kCacheSize * 2; i++) {
|
||||
for (int i = 0; i < kCacheSize + 200; i++) {
|
||||
Insert(1000 + i, 2000 + i);
|
||||
}
|
||||
|
||||
@ -585,7 +514,7 @@ TEST_P(CacheTest, SetCapacity) {
|
||||
std::vector<Cache::Handle*> handles(10);
|
||||
// Insert 5 entries, but not releasing.
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i+1);
|
||||
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||
ASSERT_TRUE(s.ok());
|
||||
}
|
||||
@ -600,7 +529,7 @@ TEST_P(CacheTest, SetCapacity) {
|
||||
// then decrease capacity to 7, final capacity should be 7
|
||||
// and usage should be 7
|
||||
for (size_t i = 5; i < 10; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i+1);
|
||||
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||
ASSERT_TRUE(s.ok());
|
||||
}
|
||||
@ -619,24 +548,20 @@ TEST_P(CacheTest, SetCapacity) {
|
||||
for (size_t i = 5; i < 10; i++) {
|
||||
cache->Release(handles[i]);
|
||||
}
|
||||
|
||||
// Make sure this doesn't crash or upset ASAN/valgrind
|
||||
cache->DisownData();
|
||||
}
|
||||
|
||||
TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
||||
TEST_P(CacheTest, SetStrictCapacityLimit) {
|
||||
// test1: set the flag to false. Insert more keys than capacity. See if they
|
||||
// all go through.
|
||||
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(5, 0, false);
|
||||
std::vector<Cache::Handle*> handles(10);
|
||||
Status s;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i + 1);
|
||||
s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||
ASSERT_OK(s);
|
||||
ASSERT_NE(nullptr, handles[i]);
|
||||
}
|
||||
ASSERT_EQ(10, cache->GetUsage());
|
||||
|
||||
// test2: set the flag to true. Insert and check if it fails.
|
||||
std::string extra_key = "extra";
|
||||
@ -646,16 +571,15 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
||||
s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
|
||||
ASSERT_TRUE(s.IsIncomplete());
|
||||
ASSERT_EQ(nullptr, handle);
|
||||
ASSERT_EQ(10, cache->GetUsage());
|
||||
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
cache->Release(handles[i]);
|
||||
}
|
||||
|
||||
// test3: init with flag being true.
|
||||
std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
|
||||
std::shared_ptr<Cache> cache2 = NewLRUCache(5, 0, true);
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i + 1);
|
||||
s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||
ASSERT_OK(s);
|
||||
ASSERT_NE(nullptr, handles[i]);
|
||||
@ -667,7 +591,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
||||
s = cache2->Insert(extra_key, extra_value, 1, &deleter);
|
||||
// AS if the key have been inserted into cache but get evicted immediately.
|
||||
ASSERT_OK(s);
|
||||
ASSERT_EQ(5, cache2->GetUsage());
|
||||
ASSERT_EQ(5, cache->GetUsage());
|
||||
ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
|
||||
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
@ -685,14 +609,14 @@ TEST_P(CacheTest, OverCapacity) {
|
||||
|
||||
// Insert n+1 entries, but not releasing.
|
||||
for (size_t i = 0; i < n + 1; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i+1);
|
||||
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
||||
ASSERT_TRUE(s.ok());
|
||||
}
|
||||
|
||||
// Guess what's in the cache now?
|
||||
for (size_t i = 0; i < n + 1; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i+1);
|
||||
auto h = cache->Lookup(key);
|
||||
ASSERT_TRUE(h != nullptr);
|
||||
if (h) cache->Release(h);
|
||||
@ -713,7 +637,7 @@ TEST_P(CacheTest, OverCapacity) {
|
||||
// This is consistent with the LRU policy since the element 0
|
||||
// was released first
|
||||
for (size_t i = 0; i < n + 1; i++) {
|
||||
std::string key = std::to_string(i + 1);
|
||||
std::string key = ToString(i+1);
|
||||
auto h = cache->Lookup(key);
|
||||
if (h) {
|
||||
ASSERT_NE(i, 0U);
|
||||
@ -725,98 +649,25 @@ TEST_P(CacheTest, OverCapacity) {
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::vector<std::pair<int, int>> legacy_callback_state;
|
||||
void legacy_callback(void* value, size_t charge) {
|
||||
legacy_callback_state.push_back(
|
||||
{DecodeValue(value), static_cast<int>(charge)});
|
||||
std::vector<std::pair<int, int>> callback_state;
|
||||
void callback(void* entry, size_t charge) {
|
||||
callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
|
||||
TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
|
||||
std::vector<std::pair<int, int>> inserted;
|
||||
legacy_callback_state.clear();
|
||||
callback_state.clear();
|
||||
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
Insert(i, i * 2, i + 1);
|
||||
inserted.push_back({i * 2, i + 1});
|
||||
}
|
||||
cache_->ApplyToAllCacheEntries(legacy_callback, true);
|
||||
|
||||
std::sort(inserted.begin(), inserted.end());
|
||||
std::sort(legacy_callback_state.begin(), legacy_callback_state.end());
|
||||
ASSERT_EQ(inserted.size(), legacy_callback_state.size());
|
||||
for (size_t i = 0; i < inserted.size(); ++i) {
|
||||
EXPECT_EQ(inserted[i], legacy_callback_state[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, ApplyToAllEntriesTest) {
|
||||
std::vector<std::string> callback_state;
|
||||
const auto callback = [&](const Slice& key, void* value, size_t charge,
|
||||
Cache::DeleterFn deleter) {
|
||||
callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
|
||||
std::to_string(DecodeValue(value)) + "," +
|
||||
std::to_string(charge));
|
||||
assert(deleter == &CacheTest::Deleter);
|
||||
};
|
||||
|
||||
std::vector<std::string> inserted;
|
||||
callback_state.clear();
|
||||
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
Insert(i, i * 2, i + 1);
|
||||
inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," +
|
||||
std::to_string(i + 1));
|
||||
}
|
||||
cache_->ApplyToAllEntries(callback, /*opts*/ {});
|
||||
cache_->ApplyToAllCacheEntries(callback, true);
|
||||
|
||||
std::sort(inserted.begin(), inserted.end());
|
||||
std::sort(callback_state.begin(), callback_state.end());
|
||||
ASSERT_EQ(inserted.size(), callback_state.size());
|
||||
for (size_t i = 0; i < inserted.size(); ++i) {
|
||||
EXPECT_EQ(inserted[i], callback_state[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
|
||||
// This is a mini-stress test of ApplyToAllEntries, to ensure
|
||||
// items in the cache that are neither added nor removed
|
||||
// during ApplyToAllEntries are counted exactly once.
|
||||
|
||||
// Insert some entries that we expect to be seen exactly once
|
||||
// during iteration.
|
||||
constexpr int kSpecialCharge = 2;
|
||||
constexpr int kNotSpecialCharge = 1;
|
||||
constexpr int kSpecialCount = 100;
|
||||
for (int i = 0; i < kSpecialCount; ++i) {
|
||||
Insert(i, i * 2, kSpecialCharge);
|
||||
}
|
||||
|
||||
// For callback
|
||||
int special_count = 0;
|
||||
const auto callback = [&](const Slice&, void*, size_t charge,
|
||||
Cache::DeleterFn) {
|
||||
if (charge == static_cast<size_t>(kSpecialCharge)) {
|
||||
++special_count;
|
||||
}
|
||||
};
|
||||
|
||||
// Start counting
|
||||
std::thread apply_thread([&]() {
|
||||
// Use small average_entries_per_lock to make the problem difficult
|
||||
Cache::ApplyToAllEntriesOptions opts;
|
||||
opts.average_entries_per_lock = 2;
|
||||
cache_->ApplyToAllEntries(callback, opts);
|
||||
});
|
||||
|
||||
// In parallel, add more entries, enough to cause resize but not enough
|
||||
// to cause ejections
|
||||
for (int i = kSpecialCount * 1; i < kSpecialCount * 6; ++i) {
|
||||
Insert(i, i * 2, kNotSpecialCharge);
|
||||
}
|
||||
|
||||
apply_thread.join();
|
||||
ASSERT_EQ(special_count, kSpecialCount);
|
||||
ASSERT_TRUE(inserted == callback_state);
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, DefaultShardBits) {
|
||||
@ -835,28 +686,16 @@ TEST_P(CacheTest, DefaultShardBits) {
|
||||
ASSERT_EQ(6, sc->GetNumShardBits());
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, GetChargeAndDeleter) {
|
||||
Insert(1, 2);
|
||||
Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
|
||||
ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
|
||||
ASSERT_EQ(1, cache_->GetCharge(h1));
|
||||
ASSERT_EQ(&CacheTest::Deleter, cache_->GetDeleter(h1));
|
||||
cache_->Release(h1);
|
||||
}
|
||||
|
||||
#ifdef SUPPORT_CLOCK_CACHE
|
||||
std::shared_ptr<Cache> (*new_clock_cache_func)(
|
||||
size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
|
||||
std::shared_ptr<Cache> (*new_clock_cache_func)(size_t, int,
|
||||
bool) = NewClockCache;
|
||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
|
||||
testing::Values(kLRU, kClock, kFast));
|
||||
testing::Values(kLRU, kClock));
|
||||
#else
|
||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
|
||||
testing::Values(kLRU, kFast));
|
||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
|
||||
#endif // SUPPORT_CLOCK_CACHE
|
||||
INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest,
|
||||
testing::Values(kLRU, kFast));
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
|
231
cache/clock_cache.cc
vendored
231
cache/clock_cache.cc
vendored
@ -11,16 +11,15 @@
|
||||
|
||||
#ifndef SUPPORT_CLOCK_CACHE
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace rocksdb {
|
||||
|
||||
std::shared_ptr<Cache> NewClockCache(
|
||||
size_t /*capacity*/, int /*num_shard_bits*/, bool /*strict_capacity_limit*/,
|
||||
CacheMetadataChargePolicy /*metadata_charge_policy*/) {
|
||||
std::shared_ptr<Cache> NewClockCache(size_t /*capacity*/, int /*num_shard_bits*/,
|
||||
bool /*strict_capacity_limit*/) {
|
||||
// Clock cache not supported.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
||||
#else
|
||||
|
||||
@ -33,15 +32,14 @@ std::shared_ptr<Cache> NewClockCache(
|
||||
#ifndef ROCKSDB_USE_RTTI
|
||||
#define TBB_USE_EXCEPTIONS 0
|
||||
#endif
|
||||
#include "cache/sharded_cache.h"
|
||||
#include "port/lang.h"
|
||||
#include "port/malloc.h"
|
||||
#include "port/port.h"
|
||||
#include "tbb/concurrent_hash_map.h"
|
||||
|
||||
#include "cache/sharded_cache.h"
|
||||
#include "port/port.h"
|
||||
#include "util/autovector.h"
|
||||
#include "util/mutexlock.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
|
||||
@ -176,16 +174,13 @@ namespace {
|
||||
// Cache entry meta data.
|
||||
struct CacheHandle {
|
||||
Slice key;
|
||||
uint32_t hash;
|
||||
void* value;
|
||||
size_t charge;
|
||||
Cache::DeleterFn deleter;
|
||||
uint32_t hash;
|
||||
|
||||
// Addition to "charge" to get "total charge" under metadata policy.
|
||||
uint32_t meta_charge;
|
||||
void (*deleter)(const Slice&, void* value);
|
||||
|
||||
// Flags and counters associated with the cache handle:
|
||||
// lowest bit: in-cache bit
|
||||
// lowest bit: n-cache bit
|
||||
// second lowest bit: usage bit
|
||||
// the rest bits: reference count
|
||||
// The handle is unused when flags equals to 0. The thread decreases the count
|
||||
@ -207,43 +202,25 @@ struct CacheHandle {
|
||||
deleter = a.deleter;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline static uint32_t CalcMetadataCharge(
|
||||
Slice key, CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
size_t meta_charge = 0;
|
||||
if (metadata_charge_policy == kFullChargeCacheMetadata) {
|
||||
meta_charge += sizeof(CacheHandle);
|
||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||
meta_charge +=
|
||||
malloc_usable_size(static_cast<void*>(const_cast<char*>(key.data())));
|
||||
#else
|
||||
meta_charge += key.size();
|
||||
#endif
|
||||
}
|
||||
assert(meta_charge <= UINT32_MAX);
|
||||
return static_cast<uint32_t>(meta_charge);
|
||||
}
|
||||
|
||||
inline size_t GetTotalCharge() { return charge + meta_charge; }
|
||||
};
|
||||
|
||||
// Key of hash map. We store hash value with the key for convenience.
|
||||
struct ClockCacheKey {
|
||||
struct CacheKey {
|
||||
Slice key;
|
||||
uint32_t hash_value;
|
||||
|
||||
ClockCacheKey() = default;
|
||||
CacheKey() = default;
|
||||
|
||||
ClockCacheKey(const Slice& k, uint32_t h) {
|
||||
CacheKey(const Slice& k, uint32_t h) {
|
||||
key = k;
|
||||
hash_value = h;
|
||||
}
|
||||
|
||||
static bool equal(const ClockCacheKey& a, const ClockCacheKey& b) {
|
||||
static bool equal(const CacheKey& a, const CacheKey& b) {
|
||||
return a.hash_value == b.hash_value && a.key == b.key;
|
||||
}
|
||||
|
||||
static size_t hash(const ClockCacheKey& a) {
|
||||
static size_t hash(const CacheKey& a) {
|
||||
return static_cast<size_t>(a.hash_value);
|
||||
}
|
||||
};
|
||||
@ -260,8 +237,7 @@ struct CleanupContext {
|
||||
class ClockCacheShard final : public CacheShard {
|
||||
public:
|
||||
// Hash map type.
|
||||
using HashTable =
|
||||
tbb::concurrent_hash_map<ClockCacheKey, CacheHandle*, ClockCacheKey>;
|
||||
typedef tbb::concurrent_hash_map<CacheKey, CacheHandle*, CacheKey> HashTable;
|
||||
|
||||
ClockCacheShard();
|
||||
~ClockCacheShard() override;
|
||||
@ -272,42 +248,21 @@ class ClockCacheShard final : public CacheShard {
|
||||
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
Cache::Handle** handle, Cache::Priority priority) override;
|
||||
Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||
const Cache::CacheItemHelper* helper, size_t charge,
|
||||
Cache::Handle** handle, Cache::Priority priority) override {
|
||||
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
|
||||
}
|
||||
Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
|
||||
Cache::Handle* Lookup(const Slice& key, uint32_t hash,
|
||||
const Cache::CacheItemHelper* /*helper*/,
|
||||
const Cache::CreateCallback& /*create_cb*/,
|
||||
Cache::Priority /*priority*/, bool /*wait*/,
|
||||
Statistics* /*stats*/) override {
|
||||
return Lookup(key, hash);
|
||||
}
|
||||
bool Release(Cache::Handle* handle, bool /*useful*/,
|
||||
bool erase_if_last_ref) override {
|
||||
return Release(handle, erase_if_last_ref);
|
||||
}
|
||||
bool IsReady(Cache::Handle* /*handle*/) override { return true; }
|
||||
void Wait(Cache::Handle* /*handle*/) override {}
|
||||
|
||||
// If the entry in in cache, increase reference count and return true.
|
||||
// Return false otherwise.
|
||||
//
|
||||
// Not necessary to hold mutex_ before being called.
|
||||
bool Ref(Cache::Handle* handle) override;
|
||||
bool Release(Cache::Handle* handle, bool erase_if_last_ref = false) override;
|
||||
bool Release(Cache::Handle* handle, bool force_erase = false) override;
|
||||
void Erase(const Slice& key, uint32_t hash) override;
|
||||
bool EraseAndConfirm(const Slice& key, uint32_t hash,
|
||||
CleanupContext* context);
|
||||
size_t GetUsage() const override;
|
||||
size_t GetPinnedUsage() const override;
|
||||
void EraseUnRefEntries() override;
|
||||
void ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) override;
|
||||
void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) override;
|
||||
|
||||
private:
|
||||
static const uint32_t kInCacheBit = 1;
|
||||
@ -363,8 +318,7 @@ class ClockCacheShard final : public CacheShard {
|
||||
CacheHandle* Insert(const Slice& key, uint32_t hash, void* value,
|
||||
size_t change,
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
bool hold_reference, CleanupContext* context,
|
||||
bool* overwritten);
|
||||
bool hold_reference, CleanupContext* context);
|
||||
|
||||
// Guards list_, head_, and recycle_. In addition, updating table_ also has
|
||||
// to hold the mutex, to avoid the cache being in inconsistent state.
|
||||
@ -426,46 +380,22 @@ size_t ClockCacheShard::GetPinnedUsage() const {
|
||||
return pinned_usage_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ClockCacheShard::ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) {
|
||||
assert(average_entries_per_lock > 0);
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
// Figure out the range to iterate, update `state`
|
||||
size_t list_size = list_.size();
|
||||
size_t start_idx = *state;
|
||||
size_t end_idx = start_idx + average_entries_per_lock;
|
||||
if (start_idx > list_size) {
|
||||
// Shouldn't reach here, but recoverable
|
||||
assert(false);
|
||||
// Mark finished with all
|
||||
*state = UINT32_MAX;
|
||||
return;
|
||||
void ClockCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) {
|
||||
if (thread_safe) {
|
||||
mutex_.Lock();
|
||||
}
|
||||
if (end_idx >= list_size || end_idx >= UINT32_MAX) {
|
||||
// This also includes the hypothetical case of >4 billion
|
||||
// cache handles.
|
||||
end_idx = list_size;
|
||||
// Mark finished with all
|
||||
*state = UINT32_MAX;
|
||||
} else {
|
||||
*state = static_cast<uint32_t>(end_idx);
|
||||
}
|
||||
|
||||
// Do the iteration
|
||||
auto cur = list_.begin() + start_idx;
|
||||
auto end = list_.begin() + end_idx;
|
||||
for (; cur != end; ++cur) {
|
||||
const CacheHandle& handle = *cur;
|
||||
// Use relaxed semantics instead of acquire semantics since we are
|
||||
// holding mutex
|
||||
for (auto& handle : list_) {
|
||||
// Use relaxed semantics instead of acquire semantics since we are either
|
||||
// holding mutex, or don't have thread safe requirement.
|
||||
uint32_t flags = handle.flags.load(std::memory_order_relaxed);
|
||||
if (InCache(flags)) {
|
||||
callback(handle.key, handle.value, handle.charge, handle.deleter);
|
||||
callback(handle.value, handle.charge);
|
||||
}
|
||||
}
|
||||
if (thread_safe) {
|
||||
mutex_.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void ClockCacheShard::RecycleHandle(CacheHandle* handle,
|
||||
@ -474,10 +404,11 @@ void ClockCacheShard::RecycleHandle(CacheHandle* handle,
|
||||
assert(!InCache(handle->flags) && CountRefs(handle->flags) == 0);
|
||||
context->to_delete_key.push_back(handle->key.data());
|
||||
context->to_delete_value.emplace_back(*handle);
|
||||
size_t total_charge = handle->GetTotalCharge();
|
||||
// clearing `handle` fields would go here but not strictly required
|
||||
handle->key.clear();
|
||||
handle->value = nullptr;
|
||||
handle->deleter = nullptr;
|
||||
recycle_.push_back(handle);
|
||||
usage_.fetch_sub(total_charge, std::memory_order_relaxed);
|
||||
usage_.fetch_sub(handle->charge, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ClockCacheShard::Cleanup(const CleanupContext& context) {
|
||||
@ -503,8 +434,7 @@ bool ClockCacheShard::Ref(Cache::Handle* h) {
|
||||
std::memory_order_relaxed)) {
|
||||
if (CountRefs(flags) == 0) {
|
||||
// No reference count before the operation.
|
||||
size_t total_charge = handle->GetTotalCharge();
|
||||
pinned_usage_.fetch_add(total_charge, std::memory_order_relaxed);
|
||||
pinned_usage_.fetch_add(handle->charge, std::memory_order_relaxed);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -517,11 +447,6 @@ bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
|
||||
if (set_usage) {
|
||||
handle->flags.fetch_or(kUsageBit, std::memory_order_relaxed);
|
||||
}
|
||||
// If the handle reaches state refs=0 and InCache=true after this
|
||||
// atomic operation then we cannot access `handle` afterward, because
|
||||
// it could be evicted before we access the `handle`.
|
||||
size_t total_charge = handle->GetTotalCharge();
|
||||
|
||||
// Use acquire-release semantics as previous operations on the cache entry
|
||||
// has to be order before reference count is decreased, and potential cleanup
|
||||
// of the entry has to be order after.
|
||||
@ -529,7 +454,7 @@ bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
|
||||
assert(CountRefs(flags) > 0);
|
||||
if (CountRefs(flags) == 1) {
|
||||
// this is the last reference.
|
||||
pinned_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
|
||||
pinned_usage_.fetch_sub(handle->charge, std::memory_order_relaxed);
|
||||
// Cleanup if it is the last reference.
|
||||
if (!InCache(flags)) {
|
||||
MutexLock l(&mutex_);
|
||||
@ -560,7 +485,7 @@ bool ClockCacheShard::TryEvict(CacheHandle* handle, CleanupContext* context) {
|
||||
if (handle->flags.compare_exchange_strong(flags, 0, std::memory_order_acquire,
|
||||
std::memory_order_relaxed)) {
|
||||
bool erased __attribute__((__unused__)) =
|
||||
table_.erase(ClockCacheKey(handle->key, handle->hash));
|
||||
table_.erase(CacheKey(handle->key, handle->hash));
|
||||
assert(erased);
|
||||
RecycleHandle(handle, context);
|
||||
return true;
|
||||
@ -613,13 +538,9 @@ void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
||||
CacheHandle* ClockCacheShard::Insert(
|
||||
const Slice& key, uint32_t hash, void* value, size_t charge,
|
||||
void (*deleter)(const Slice& key, void* value), bool hold_reference,
|
||||
CleanupContext* context, bool* overwritten) {
|
||||
assert(overwritten != nullptr && *overwritten == false);
|
||||
uint32_t meta_charge =
|
||||
CacheHandle::CalcMetadataCharge(key, metadata_charge_policy_);
|
||||
size_t total_charge = charge + meta_charge;
|
||||
CleanupContext* context) {
|
||||
MutexLock l(&mutex_);
|
||||
bool success = EvictFromCache(total_charge, context);
|
||||
bool success = EvictFromCache(charge, context);
|
||||
bool strict = strict_capacity_limit_.load(std::memory_order_relaxed);
|
||||
if (!success && (strict || !hold_reference)) {
|
||||
context->to_delete_key.push_back(key.data());
|
||||
@ -643,31 +564,20 @@ CacheHandle* ClockCacheShard::Insert(
|
||||
handle->hash = hash;
|
||||
handle->value = value;
|
||||
handle->charge = charge;
|
||||
handle->meta_charge = meta_charge;
|
||||
handle->deleter = deleter;
|
||||
uint32_t flags = hold_reference ? kInCacheBit + kOneRef : kInCacheBit;
|
||||
|
||||
// TODO investigate+fix suspected race condition:
|
||||
// [thread 1] Lookup starts, up to Ref()
|
||||
// [thread 2] Erase/evict the entry just looked up
|
||||
// [thread 1] Ref() the handle, even though it's in the recycle bin
|
||||
// [thread 2] Insert with recycling that handle
|
||||
// Here we obliterate the other thread's Ref
|
||||
// Possible fix: never blindly overwrite the flags, but only make
|
||||
// relative updates (fetch_add, etc).
|
||||
handle->flags.store(flags, std::memory_order_relaxed);
|
||||
HashTable::accessor accessor;
|
||||
if (table_.find(accessor, ClockCacheKey(key, hash))) {
|
||||
*overwritten = true;
|
||||
if (table_.find(accessor, CacheKey(key, hash))) {
|
||||
CacheHandle* existing_handle = accessor->second;
|
||||
table_.erase(accessor);
|
||||
UnsetInCache(existing_handle, context);
|
||||
}
|
||||
table_.insert(HashTable::value_type(ClockCacheKey(key, hash), handle));
|
||||
table_.insert(HashTable::value_type(CacheKey(key, hash), handle));
|
||||
if (hold_reference) {
|
||||
pinned_usage_.fetch_add(total_charge, std::memory_order_relaxed);
|
||||
pinned_usage_.fetch_add(charge, std::memory_order_relaxed);
|
||||
}
|
||||
usage_.fetch_add(total_charge, std::memory_order_relaxed);
|
||||
usage_.fetch_add(charge, std::memory_order_relaxed);
|
||||
return handle;
|
||||
}
|
||||
|
||||
@ -681,28 +591,23 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
||||
char* key_data = new char[key.size()];
|
||||
memcpy(key_data, key.data(), key.size());
|
||||
Slice key_copy(key_data, key.size());
|
||||
bool overwritten = false;
|
||||
CacheHandle* handle = Insert(key_copy, hash, value, charge, deleter,
|
||||
out_handle != nullptr, &context, &overwritten);
|
||||
out_handle != nullptr, &context);
|
||||
Status s;
|
||||
if (out_handle != nullptr) {
|
||||
if (handle == nullptr) {
|
||||
s = Status::Incomplete("Insert failed due to CLOCK cache being full.");
|
||||
s = Status::Incomplete("Insert failed due to LRU cache being full.");
|
||||
} else {
|
||||
*out_handle = reinterpret_cast<Cache::Handle*>(handle);
|
||||
}
|
||||
}
|
||||
if (overwritten) {
|
||||
assert(s.ok());
|
||||
s = Status::OkOverwritten();
|
||||
}
|
||||
Cleanup(context);
|
||||
return s;
|
||||
}
|
||||
|
||||
Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
|
||||
HashTable::const_accessor accessor;
|
||||
if (!table_.find(accessor, ClockCacheKey(key, hash))) {
|
||||
if (!table_.find(accessor, CacheKey(key, hash))) {
|
||||
return nullptr;
|
||||
}
|
||||
CacheHandle* handle = accessor->second;
|
||||
@ -725,11 +630,11 @@ Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
|
||||
return reinterpret_cast<Cache::Handle*>(handle);
|
||||
}
|
||||
|
||||
bool ClockCacheShard::Release(Cache::Handle* h, bool erase_if_last_ref) {
|
||||
bool ClockCacheShard::Release(Cache::Handle* h, bool force_erase) {
|
||||
CleanupContext context;
|
||||
CacheHandle* handle = reinterpret_cast<CacheHandle*>(h);
|
||||
bool erased = Unref(handle, true, &context);
|
||||
if (erase_if_last_ref && !erased) {
|
||||
if (force_erase && !erased) {
|
||||
erased = EraseAndConfirm(handle->key, handle->hash, &context);
|
||||
}
|
||||
Cleanup(context);
|
||||
@ -747,7 +652,7 @@ bool ClockCacheShard::EraseAndConfirm(const Slice& key, uint32_t hash,
|
||||
MutexLock l(&mutex_);
|
||||
HashTable::accessor accessor;
|
||||
bool erased = false;
|
||||
if (table_.find(accessor, ClockCacheKey(key, hash))) {
|
||||
if (table_.find(accessor, CacheKey(key, hash))) {
|
||||
CacheHandle* handle = accessor->second;
|
||||
table_.erase(accessor);
|
||||
erased = UnsetInCache(handle, context);
|
||||
@ -769,14 +674,10 @@ void ClockCacheShard::EraseUnRefEntries() {
|
||||
|
||||
class ClockCache final : public ShardedCache {
|
||||
public:
|
||||
ClockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy)
|
||||
ClockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit)
|
||||
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
|
||||
int num_shards = 1 << num_shard_bits;
|
||||
shards_ = new ClockCacheShard[num_shards];
|
||||
for (int i = 0; i < num_shards; i++) {
|
||||
shards_[i].set_metadata_charge_policy(metadata_charge_policy);
|
||||
}
|
||||
SetCapacity(capacity);
|
||||
SetStrictCapacityLimit(strict_capacity_limit);
|
||||
}
|
||||
@ -785,11 +686,11 @@ class ClockCache final : public ShardedCache {
|
||||
|
||||
const char* Name() const override { return "ClockCache"; }
|
||||
|
||||
CacheShard* GetShard(uint32_t shard) override {
|
||||
CacheShard* GetShard(int shard) override {
|
||||
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||
}
|
||||
|
||||
const CacheShard* GetShard(uint32_t shard) const override {
|
||||
const CacheShard* GetShard(int shard) const override {
|
||||
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||
}
|
||||
|
||||
@ -805,18 +706,7 @@ class ClockCache final : public ShardedCache {
|
||||
return reinterpret_cast<const CacheHandle*>(handle)->hash;
|
||||
}
|
||||
|
||||
DeleterFn GetDeleter(Handle* handle) const override {
|
||||
return reinterpret_cast<const CacheHandle*>(handle)->deleter;
|
||||
}
|
||||
|
||||
void DisownData() override {
|
||||
// Leak data only if that won't generate an ASAN/valgrind warning
|
||||
if (!kMustFreeHeapAllocations) {
|
||||
shards_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void WaitAll(std::vector<Handle*>& /*handles*/) override {}
|
||||
void DisownData() override { shards_ = nullptr; }
|
||||
|
||||
private:
|
||||
ClockCacheShard* shards_;
|
||||
@ -824,16 +714,15 @@ class ClockCache final : public ShardedCache {
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
std::shared_ptr<Cache> NewClockCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
std::shared_ptr<Cache> NewClockCache(size_t capacity, int num_shard_bits,
|
||||
bool strict_capacity_limit) {
|
||||
if (num_shard_bits < 0) {
|
||||
num_shard_bits = GetDefaultCacheShardBits(capacity);
|
||||
}
|
||||
return std::make_shared<ClockCache>(
|
||||
capacity, num_shard_bits, strict_capacity_limit, metadata_charge_policy);
|
||||
return std::make_shared<ClockCache>(capacity, num_shard_bits,
|
||||
strict_capacity_limit);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
||||
#endif // SUPPORT_CLOCK_CACHE
|
||||
|
171
cache/compressed_secondary_cache.cc
vendored
171
cache/compressed_secondary_cache.cc
vendored
@ -1,171 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "cache/compressed_secondary_cache.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "memory/memory_allocator.h"
|
||||
#include "util/compression.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
namespace {
|
||||
|
||||
void DeletionCallback(const Slice& /*key*/, void* obj) {
|
||||
delete reinterpret_cast<CacheAllocationPtr*>(obj);
|
||||
obj = nullptr;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
CompressedSecondaryCache::CompressedSecondaryCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
CompressionType compression_type, uint32_t compress_format_version)
|
||||
: cache_options_(capacity, num_shard_bits, strict_capacity_limit,
|
||||
high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
|
||||
metadata_charge_policy, compression_type,
|
||||
compress_format_version) {
|
||||
cache_ = NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||
high_pri_pool_ratio, memory_allocator,
|
||||
use_adaptive_mutex, metadata_charge_policy);
|
||||
}
|
||||
|
||||
CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
|
||||
|
||||
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
|
||||
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
||||
bool& is_in_sec_cache) {
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle;
|
||||
is_in_sec_cache = false;
|
||||
Cache::Handle* lru_handle = cache_->Lookup(key);
|
||||
if (lru_handle == nullptr) {
|
||||
return handle;
|
||||
}
|
||||
|
||||
CacheAllocationPtr* ptr =
|
||||
reinterpret_cast<CacheAllocationPtr*>(cache_->Value(lru_handle));
|
||||
void* value = nullptr;
|
||||
size_t charge = 0;
|
||||
Status s;
|
||||
|
||||
if (cache_options_.compression_type == kNoCompression) {
|
||||
s = create_cb(ptr->get(), cache_->GetCharge(lru_handle), &value, &charge);
|
||||
} else {
|
||||
UncompressionContext uncompression_context(cache_options_.compression_type);
|
||||
UncompressionInfo uncompression_info(uncompression_context,
|
||||
UncompressionDict::GetEmptyDict(),
|
||||
cache_options_.compression_type);
|
||||
|
||||
size_t uncompressed_size = 0;
|
||||
CacheAllocationPtr uncompressed;
|
||||
uncompressed = UncompressData(
|
||||
uncompression_info, (char*)ptr->get(), cache_->GetCharge(lru_handle),
|
||||
&uncompressed_size, cache_options_.compress_format_version,
|
||||
cache_options_.memory_allocator.get());
|
||||
|
||||
if (!uncompressed) {
|
||||
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
||||
return handle;
|
||||
}
|
||||
s = create_cb(uncompressed.get(), uncompressed_size, &value, &charge);
|
||||
}
|
||||
|
||||
if (!s.ok()) {
|
||||
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
||||
return handle;
|
||||
}
|
||||
|
||||
cache_->Release(lru_handle, /* erase_if_last_ref */ true);
|
||||
handle.reset(new CompressedSecondaryCacheResultHandle(value, charge));
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
Status CompressedSecondaryCache::Insert(const Slice& key, void* value,
|
||||
const Cache::CacheItemHelper* helper) {
|
||||
size_t size = (*helper->size_cb)(value);
|
||||
CacheAllocationPtr ptr =
|
||||
AllocateBlock(size, cache_options_.memory_allocator.get());
|
||||
|
||||
Status s = (*helper->saveto_cb)(value, 0, size, ptr.get());
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
Slice val(ptr.get(), size);
|
||||
|
||||
std::string compressed_val;
|
||||
if (cache_options_.compression_type != kNoCompression) {
|
||||
CompressionOptions compression_opts;
|
||||
CompressionContext compression_context(cache_options_.compression_type);
|
||||
uint64_t sample_for_compression = 0;
|
||||
CompressionInfo compression_info(
|
||||
compression_opts, compression_context, CompressionDict::GetEmptyDict(),
|
||||
cache_options_.compression_type, sample_for_compression);
|
||||
|
||||
bool success =
|
||||
CompressData(val, compression_info,
|
||||
cache_options_.compress_format_version, &compressed_val);
|
||||
|
||||
if (!success) {
|
||||
return Status::Corruption("Error compressing value.");
|
||||
}
|
||||
|
||||
val = Slice(compressed_val);
|
||||
size = compressed_val.size();
|
||||
ptr = AllocateBlock(size, cache_options_.memory_allocator.get());
|
||||
memcpy(ptr.get(), compressed_val.data(), size);
|
||||
}
|
||||
|
||||
CacheAllocationPtr* buf = new CacheAllocationPtr(std::move(ptr));
|
||||
|
||||
return cache_->Insert(key, buf, size, DeletionCallback);
|
||||
}
|
||||
|
||||
void CompressedSecondaryCache::Erase(const Slice& key) { cache_->Erase(key); }
|
||||
|
||||
std::string CompressedSecondaryCache::GetPrintableOptions() const {
|
||||
std::string ret;
|
||||
ret.reserve(20000);
|
||||
const int kBufferSize = 200;
|
||||
char buffer[kBufferSize];
|
||||
ret.append(cache_->GetPrintableOptions());
|
||||
snprintf(buffer, kBufferSize, " compression_type : %s\n",
|
||||
CompressionTypeToString(cache_options_.compression_type).c_str());
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " compression_type : %d\n",
|
||||
cache_options_.compress_format_version);
|
||||
ret.append(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
CompressionType compression_type, uint32_t compress_format_version) {
|
||||
return std::make_shared<CompressedSecondaryCache>(
|
||||
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
|
||||
memory_allocator, use_adaptive_mutex, metadata_charge_policy,
|
||||
compression_type, compress_format_version);
|
||||
}
|
||||
|
||||
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
|
||||
const CompressedSecondaryCacheOptions& opts) {
|
||||
// The secondary_cache is disabled for this LRUCache instance.
|
||||
assert(opts.secondary_cache == nullptr);
|
||||
return NewCompressedSecondaryCache(
|
||||
opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
|
||||
opts.high_pri_pool_ratio, opts.memory_allocator, opts.use_adaptive_mutex,
|
||||
opts.metadata_charge_policy, opts.compression_type,
|
||||
opts.compress_format_version);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
86
cache/compressed_secondary_cache.h
vendored
86
cache/compressed_secondary_cache.h
vendored
@ -1,86 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "cache/lru_cache.h"
|
||||
#include "memory/memory_allocator.h"
|
||||
#include "rocksdb/secondary_cache.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/status.h"
|
||||
#include "util/compression.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
|
||||
public:
|
||||
CompressedSecondaryCacheResultHandle(void* value, size_t size)
|
||||
: value_(value), size_(size) {}
|
||||
virtual ~CompressedSecondaryCacheResultHandle() override = default;
|
||||
|
||||
CompressedSecondaryCacheResultHandle(
|
||||
const CompressedSecondaryCacheResultHandle&) = delete;
|
||||
CompressedSecondaryCacheResultHandle& operator=(
|
||||
const CompressedSecondaryCacheResultHandle&) = delete;
|
||||
|
||||
bool IsReady() override { return true; }
|
||||
|
||||
void Wait() override {}
|
||||
|
||||
void* Value() override { return value_; }
|
||||
|
||||
size_t Size() override { return size_; }
|
||||
|
||||
private:
|
||||
void* value_;
|
||||
size_t size_;
|
||||
};
|
||||
|
||||
// The CompressedSecondaryCache is a concrete implementation of
|
||||
// rocksdb::SecondaryCache.
|
||||
//
|
||||
// Users can also cast a pointer to it and call methods on
|
||||
// it directly, especially custom methods that may be added
|
||||
// in the future. For example -
|
||||
// std::unique_ptr<rocksdb::SecondaryCache> cache =
|
||||
// NewCompressedSecondaryCache(opts);
|
||||
// static_cast<CompressedSecondaryCache*>(cache.get())->Erase(key);
|
||||
|
||||
class CompressedSecondaryCache : public SecondaryCache {
|
||||
public:
|
||||
CompressedSecondaryCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
|
||||
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy =
|
||||
kDontChargeCacheMetadata,
|
||||
CompressionType compression_type = CompressionType::kLZ4Compression,
|
||||
uint32_t compress_format_version = 2);
|
||||
virtual ~CompressedSecondaryCache() override;
|
||||
|
||||
const char* Name() const override { return "CompressedSecondaryCache"; }
|
||||
|
||||
Status Insert(const Slice& key, void* value,
|
||||
const Cache::CacheItemHelper* helper) override;
|
||||
|
||||
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
||||
const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
|
||||
bool& is_in_sec_cache) override;
|
||||
|
||||
void Erase(const Slice& key) override;
|
||||
|
||||
void WaitAll(std::vector<SecondaryCacheResultHandle*> /*handles*/) override {}
|
||||
|
||||
std::string GetPrintableOptions() const override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Cache> cache_;
|
||||
CompressedSecondaryCacheOptions cache_options_;
|
||||
};
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
607
cache/compressed_secondary_cache_test.cc
vendored
607
cache/compressed_secondary_cache_test.cc
vendored
@ -1,607 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "cache/compressed_secondary_cache.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
|
||||
#include "memory/jemalloc_nodump_allocator.h"
|
||||
#include "memory/memory_allocator.h"
|
||||
#include "test_util/testharness.h"
|
||||
#include "test_util/testutil.h"
|
||||
#include "util/compression.h"
|
||||
#include "util/random.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
class CompressedSecondaryCacheTest : public testing::Test {
|
||||
public:
|
||||
CompressedSecondaryCacheTest() : fail_create_(false) {}
|
||||
~CompressedSecondaryCacheTest() {}
|
||||
|
||||
protected:
|
||||
class TestItem {
|
||||
public:
|
||||
TestItem(const char* buf, size_t size) : buf_(new char[size]), size_(size) {
|
||||
memcpy(buf_.get(), buf, size);
|
||||
}
|
||||
~TestItem() {}
|
||||
|
||||
char* Buf() { return buf_.get(); }
|
||||
size_t Size() { return size_; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<char[]> buf_;
|
||||
size_t size_;
|
||||
};
|
||||
|
||||
static size_t SizeCallback(void* obj) {
|
||||
return reinterpret_cast<TestItem*>(obj)->Size();
|
||||
}
|
||||
|
||||
static Status SaveToCallback(void* from_obj, size_t from_offset,
|
||||
size_t length, void* out) {
|
||||
TestItem* item = reinterpret_cast<TestItem*>(from_obj);
|
||||
const char* buf = item->Buf();
|
||||
EXPECT_EQ(length, item->Size());
|
||||
EXPECT_EQ(from_offset, 0);
|
||||
memcpy(out, buf, length);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
static void DeletionCallback(const Slice& /*key*/, void* obj) {
|
||||
delete reinterpret_cast<TestItem*>(obj);
|
||||
obj = nullptr;
|
||||
}
|
||||
|
||||
static Cache::CacheItemHelper helper_;
|
||||
|
||||
static Status SaveToCallbackFail(void* /*obj*/, size_t /*offset*/,
|
||||
size_t /*size*/, void* /*out*/) {
|
||||
return Status::NotSupported();
|
||||
}
|
||||
|
||||
static Cache::CacheItemHelper helper_fail_;
|
||||
|
||||
Cache::CreateCallback test_item_creator = [&](const void* buf, size_t size,
|
||||
void** out_obj,
|
||||
size_t* charge) -> Status {
|
||||
if (fail_create_) {
|
||||
return Status::NotSupported();
|
||||
}
|
||||
*out_obj = reinterpret_cast<void*>(new TestItem((char*)buf, size));
|
||||
*charge = size;
|
||||
return Status::OK();
|
||||
};
|
||||
|
||||
void SetFailCreate(bool fail) { fail_create_ = fail; }
|
||||
|
||||
void BasicTest(bool sec_cache_is_compressed, bool use_jemalloc) {
|
||||
CompressedSecondaryCacheOptions opts;
|
||||
opts.capacity = 2048;
|
||||
opts.num_shard_bits = 0;
|
||||
opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
if (use_jemalloc) {
|
||||
JemallocAllocatorOptions jopts;
|
||||
std::shared_ptr<MemoryAllocator> allocator;
|
||||
std::string msg;
|
||||
if (JemallocNodumpAllocator::IsSupported(&msg)) {
|
||||
Status s = NewJemallocNodumpAllocator(jopts, &allocator);
|
||||
if (s.ok()) {
|
||||
opts.memory_allocator = allocator;
|
||||
}
|
||||
} else {
|
||||
ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
|
||||
}
|
||||
}
|
||||
std::shared_ptr<SecondaryCache> sec_cache =
|
||||
NewCompressedSecondaryCache(opts);
|
||||
|
||||
bool is_in_sec_cache{true};
|
||||
// Lookup an non-existent key.
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle0 =
|
||||
sec_cache->Lookup("k0", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_EQ(handle0, nullptr);
|
||||
|
||||
Random rnd(301);
|
||||
// Insert and Lookup the first item.
|
||||
std::string str1;
|
||||
test::CompressibleString(&rnd, 0.25, 1000, &str1);
|
||||
TestItem item1(str1.data(), str1.length());
|
||||
ASSERT_OK(sec_cache->Insert("k1", &item1,
|
||||
&CompressedSecondaryCacheTest::helper_));
|
||||
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle1 =
|
||||
sec_cache->Lookup("k1", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_NE(handle1, nullptr);
|
||||
ASSERT_FALSE(is_in_sec_cache);
|
||||
|
||||
std::unique_ptr<TestItem> val1 =
|
||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle1->Value()));
|
||||
ASSERT_NE(val1, nullptr);
|
||||
ASSERT_EQ(memcmp(val1->Buf(), item1.Buf(), item1.Size()), 0);
|
||||
|
||||
// Lookup the first item again.
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
|
||||
sec_cache->Lookup("k1", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_EQ(handle1_1, nullptr);
|
||||
|
||||
// Insert and Lookup the second item.
|
||||
std::string str2;
|
||||
test::CompressibleString(&rnd, 0.5, 1000, &str2);
|
||||
TestItem item2(str2.data(), str2.length());
|
||||
ASSERT_OK(sec_cache->Insert("k2", &item2,
|
||||
&CompressedSecondaryCacheTest::helper_));
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle2 =
|
||||
sec_cache->Lookup("k2", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_NE(handle2, nullptr);
|
||||
std::unique_ptr<TestItem> val2 =
|
||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
|
||||
ASSERT_NE(val2, nullptr);
|
||||
ASSERT_EQ(memcmp(val2->Buf(), item2.Buf(), item2.Size()), 0);
|
||||
|
||||
std::vector<SecondaryCacheResultHandle*> handles = {handle1.get(),
|
||||
handle2.get()};
|
||||
sec_cache->WaitAll(handles);
|
||||
|
||||
sec_cache.reset();
|
||||
}
|
||||
|
||||
void FailsTest(bool sec_cache_is_compressed) {
|
||||
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
secondary_cache_opts.capacity = 1100;
|
||||
secondary_cache_opts.num_shard_bits = 0;
|
||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
std::shared_ptr<SecondaryCache> sec_cache =
|
||||
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||
|
||||
// Insert and Lookup the first item.
|
||||
Random rnd(301);
|
||||
std::string str1(rnd.RandomString(1000));
|
||||
TestItem item1(str1.data(), str1.length());
|
||||
ASSERT_OK(sec_cache->Insert("k1", &item1,
|
||||
&CompressedSecondaryCacheTest::helper_));
|
||||
|
||||
// Insert and Lookup the second item.
|
||||
std::string str2(rnd.RandomString(200));
|
||||
TestItem item2(str2.data(), str2.length());
|
||||
// k1 is evicted.
|
||||
ASSERT_OK(sec_cache->Insert("k2", &item2,
|
||||
&CompressedSecondaryCacheTest::helper_));
|
||||
bool is_in_sec_cache{false};
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
|
||||
sec_cache->Lookup("k1", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_EQ(handle1_1, nullptr);
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle2 =
|
||||
sec_cache->Lookup("k2", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_NE(handle2, nullptr);
|
||||
std::unique_ptr<TestItem> val2 =
|
||||
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
|
||||
ASSERT_NE(val2, nullptr);
|
||||
ASSERT_EQ(memcmp(val2->Buf(), item2.Buf(), item2.Size()), 0);
|
||||
|
||||
// Create Fails.
|
||||
SetFailCreate(true);
|
||||
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 =
|
||||
sec_cache->Lookup("k2", test_item_creator, true, is_in_sec_cache);
|
||||
ASSERT_EQ(handle2_1, nullptr);
|
||||
|
||||
// Save Fails.
|
||||
std::string str3 = rnd.RandomString(10);
|
||||
TestItem item3(str3.data(), str3.length());
|
||||
ASSERT_NOK(sec_cache->Insert("k3", &item3,
|
||||
&CompressedSecondaryCacheTest::helper_fail_));
|
||||
|
||||
sec_cache.reset();
|
||||
}
|
||||
|
||||
void BasicIntegrationTest(bool sec_cache_is_compressed) {
|
||||
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
secondary_cache_opts.capacity = 2300;
|
||||
secondary_cache_opts.num_shard_bits = 0;
|
||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||
LRUCacheOptions lru_cache_opts(1024, 0, false, 0.5, nullptr,
|
||||
kDefaultToAdaptiveMutex,
|
||||
kDontChargeCacheMetadata);
|
||||
lru_cache_opts.secondary_cache = secondary_cache;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(lru_cache_opts);
|
||||
std::shared_ptr<Statistics> stats = CreateDBStatistics();
|
||||
|
||||
Random rnd(301);
|
||||
|
||||
std::string str1 = rnd.RandomString(1010);
|
||||
std::string str1_clone{str1};
|
||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||
ASSERT_OK(cache->Insert("k1", item1, &CompressedSecondaryCacheTest::helper_,
|
||||
str1.length()));
|
||||
|
||||
std::string str2 = rnd.RandomString(1020);
|
||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||
// After Insert, lru cache contains k2 and secondary cache contains k1.
|
||||
ASSERT_OK(cache->Insert("k2", item2, &CompressedSecondaryCacheTest::helper_,
|
||||
str2.length()));
|
||||
|
||||
std::string str3 = rnd.RandomString(1020);
|
||||
TestItem* item3 = new TestItem(str3.data(), str3.length());
|
||||
// After Insert, lru cache contains k3 and secondary cache contains k1 and
|
||||
// k2
|
||||
ASSERT_OK(cache->Insert("k3", item3, &CompressedSecondaryCacheTest::helper_,
|
||||
str3.length()));
|
||||
|
||||
Cache::Handle* handle;
|
||||
handle = cache->Lookup("k3", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true,
|
||||
stats.get());
|
||||
ASSERT_NE(handle, nullptr);
|
||||
TestItem* val3 = static_cast<TestItem*>(cache->Value(handle));
|
||||
ASSERT_NE(val3, nullptr);
|
||||
ASSERT_EQ(memcmp(val3->Buf(), item3->Buf(), item3->Size()), 0);
|
||||
cache->Release(handle);
|
||||
|
||||
// Lookup an non-existent key.
|
||||
handle = cache->Lookup("k0", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true,
|
||||
stats.get());
|
||||
ASSERT_EQ(handle, nullptr);
|
||||
|
||||
// This Lookup should promote k1 and erase k1 from the secondary cache,
|
||||
// then k3 is demoted. So k2 and k3 are in the secondary cache.
|
||||
handle = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true,
|
||||
stats.get());
|
||||
|
||||
ASSERT_NE(handle, nullptr);
|
||||
TestItem* val1_1 = static_cast<TestItem*>(cache->Value(handle));
|
||||
ASSERT_NE(val1_1, nullptr);
|
||||
ASSERT_EQ(memcmp(val1_1->Buf(), str1_clone.data(), str1_clone.size()), 0);
|
||||
cache->Release(handle);
|
||||
|
||||
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true,
|
||||
stats.get());
|
||||
ASSERT_NE(handle, nullptr);
|
||||
cache->Release(handle);
|
||||
|
||||
cache.reset();
|
||||
secondary_cache.reset();
|
||||
}
|
||||
|
||||
void BasicIntegrationFailTest(bool sec_cache_is_compressed) {
|
||||
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
secondary_cache_opts.capacity = 2048;
|
||||
secondary_cache_opts.num_shard_bits = 0;
|
||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||
|
||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||
kDontChargeCacheMetadata);
|
||||
opts.secondary_cache = secondary_cache;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(opts);
|
||||
|
||||
Random rnd(301);
|
||||
std::string str1 = rnd.RandomString(1020);
|
||||
auto item1 =
|
||||
std::unique_ptr<TestItem>(new TestItem(str1.data(), str1.length()));
|
||||
ASSERT_NOK(cache->Insert("k1", item1.get(), nullptr, str1.length()));
|
||||
ASSERT_OK(cache->Insert("k1", item1.get(),
|
||||
&CompressedSecondaryCacheTest::helper_,
|
||||
str1.length()));
|
||||
item1.release(); // Appease clang-analyze "potential memory leak"
|
||||
|
||||
Cache::Handle* handle;
|
||||
handle = cache->Lookup("k2", nullptr, test_item_creator,
|
||||
Cache::Priority::LOW, true);
|
||||
ASSERT_EQ(handle, nullptr);
|
||||
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, false);
|
||||
ASSERT_EQ(handle, nullptr);
|
||||
|
||||
cache.reset();
|
||||
secondary_cache.reset();
|
||||
}
|
||||
|
||||
void IntegrationSaveFailTest(bool sec_cache_is_compressed) {
|
||||
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
secondary_cache_opts.capacity = 2048;
|
||||
secondary_cache_opts.num_shard_bits = 0;
|
||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
|
||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||
|
||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||
kDontChargeCacheMetadata);
|
||||
opts.secondary_cache = secondary_cache;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(opts);
|
||||
|
||||
Random rnd(301);
|
||||
std::string str1 = rnd.RandomString(1020);
|
||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||
ASSERT_OK(cache->Insert("k1", item1,
|
||||
&CompressedSecondaryCacheTest::helper_fail_,
|
||||
str1.length()));
|
||||
std::string str2 = rnd.RandomString(1020);
|
||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||
// k1 should be demoted to the secondary cache.
|
||||
ASSERT_OK(cache->Insert("k2", item2,
|
||||
&CompressedSecondaryCacheTest::helper_fail_,
|
||||
str2.length()));
|
||||
|
||||
Cache::Handle* handle;
|
||||
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_fail_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle, nullptr);
|
||||
cache->Release(handle);
|
||||
// This lookup should fail, since k1 demotion would have failed
|
||||
handle = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_fail_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_EQ(handle, nullptr);
|
||||
// Since k1 didn't get promoted, k2 should still be in cache
|
||||
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_fail_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle, nullptr);
|
||||
cache->Release(handle);
|
||||
|
||||
cache.reset();
|
||||
secondary_cache.reset();
|
||||
}
|
||||
|
||||
void IntegrationCreateFailTest(bool sec_cache_is_compressed) {
|
||||
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
secondary_cache_opts.capacity = 2048;
|
||||
secondary_cache_opts.num_shard_bits = 0;
|
||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
|
||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||
|
||||
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
|
||||
kDontChargeCacheMetadata);
|
||||
opts.secondary_cache = secondary_cache;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(opts);
|
||||
|
||||
Random rnd(301);
|
||||
std::string str1 = rnd.RandomString(1020);
|
||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||
ASSERT_OK(cache->Insert("k1", item1, &CompressedSecondaryCacheTest::helper_,
|
||||
str1.length()));
|
||||
|
||||
std::string str2 = rnd.RandomString(1020);
|
||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||
// k1 should be demoted to the secondary cache.
|
||||
ASSERT_OK(cache->Insert("k2", item2, &CompressedSecondaryCacheTest::helper_,
|
||||
str2.length()));
|
||||
|
||||
Cache::Handle* handle;
|
||||
SetFailCreate(true);
|
||||
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle, nullptr);
|
||||
cache->Release(handle);
|
||||
// This lookup should fail, since k1 creation would have failed
|
||||
handle = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_EQ(handle, nullptr);
|
||||
// Since k1 didn't get promoted, k2 should still be in cache
|
||||
handle = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle, nullptr);
|
||||
cache->Release(handle);
|
||||
|
||||
cache.reset();
|
||||
secondary_cache.reset();
|
||||
}
|
||||
|
||||
void IntegrationFullCapacityTest(bool sec_cache_is_compressed) {
|
||||
CompressedSecondaryCacheOptions secondary_cache_opts;
|
||||
|
||||
if (sec_cache_is_compressed) {
|
||||
if (!LZ4_Supported()) {
|
||||
ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
} else {
|
||||
secondary_cache_opts.compression_type = CompressionType::kNoCompression;
|
||||
}
|
||||
|
||||
secondary_cache_opts.capacity = 2048;
|
||||
secondary_cache_opts.num_shard_bits = 0;
|
||||
secondary_cache_opts.metadata_charge_policy = kDontChargeCacheMetadata;
|
||||
|
||||
std::shared_ptr<SecondaryCache> secondary_cache =
|
||||
NewCompressedSecondaryCache(secondary_cache_opts);
|
||||
|
||||
LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
|
||||
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
|
||||
opts.secondary_cache = secondary_cache;
|
||||
std::shared_ptr<Cache> cache = NewLRUCache(opts);
|
||||
|
||||
Random rnd(301);
|
||||
std::string str1 = rnd.RandomString(1020);
|
||||
TestItem* item1 = new TestItem(str1.data(), str1.length());
|
||||
ASSERT_OK(cache->Insert("k1", item1, &CompressedSecondaryCacheTest::helper_,
|
||||
str1.length()));
|
||||
std::string str2 = rnd.RandomString(1020);
|
||||
TestItem* item2 = new TestItem(str2.data(), str2.length());
|
||||
// k1 should be demoted to the secondary cache.
|
||||
ASSERT_OK(cache->Insert("k2", item2, &CompressedSecondaryCacheTest::helper_,
|
||||
str2.length()));
|
||||
|
||||
Cache::Handle* handle2;
|
||||
handle2 = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle2, nullptr);
|
||||
cache->Release(handle2);
|
||||
// k1 promotion should fail due to the block cache being at capacity,
|
||||
// but the lookup should still succeed
|
||||
Cache::Handle* handle1;
|
||||
handle1 = cache->Lookup("k1", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle1, nullptr);
|
||||
cache->Release(handle1);
|
||||
|
||||
// Since k1 didn't get inserted, k2 should still be in cache
|
||||
handle2 = cache->Lookup("k2", &CompressedSecondaryCacheTest::helper_,
|
||||
test_item_creator, Cache::Priority::LOW, true);
|
||||
ASSERT_NE(handle2, nullptr);
|
||||
cache->Release(handle2);
|
||||
|
||||
cache.reset();
|
||||
secondary_cache.reset();
|
||||
}
|
||||
|
||||
private:
|
||||
bool fail_create_;
|
||||
};
|
||||
|
||||
Cache::CacheItemHelper CompressedSecondaryCacheTest::helper_(
|
||||
CompressedSecondaryCacheTest::SizeCallback,
|
||||
CompressedSecondaryCacheTest::SaveToCallback,
|
||||
CompressedSecondaryCacheTest::DeletionCallback);
|
||||
|
||||
Cache::CacheItemHelper CompressedSecondaryCacheTest::helper_fail_(
|
||||
CompressedSecondaryCacheTest::SizeCallback,
|
||||
CompressedSecondaryCacheTest::SaveToCallbackFail,
|
||||
CompressedSecondaryCacheTest::DeletionCallback);
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, BasicTestWithNoCompression) {
|
||||
BasicTest(false, false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest,
|
||||
BasicTestWithMemoryAllocatorAndNoCompression) {
|
||||
BasicTest(false, true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, BasicTestWithCompression) {
|
||||
BasicTest(true, false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest,
|
||||
BasicTestWithMemoryAllocatorAndCompression) {
|
||||
BasicTest(true, true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, FailsTestWithNoCompression) {
|
||||
FailsTest(false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, FailsTestWithCompression) {
|
||||
FailsTest(true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, BasicIntegrationTestWithNoCompression) {
|
||||
BasicIntegrationTest(false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, BasicIntegrationTestWithCompression) {
|
||||
BasicIntegrationTest(true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest,
|
||||
BasicIntegrationFailTestWithNoCompression) {
|
||||
BasicIntegrationFailTest(false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, BasicIntegrationFailTestWithCompression) {
|
||||
BasicIntegrationFailTest(true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, IntegrationSaveFailTestWithNoCompression) {
|
||||
IntegrationSaveFailTest(false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, IntegrationSaveFailTestWithCompression) {
|
||||
IntegrationSaveFailTest(true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest,
|
||||
IntegrationCreateFailTestWithNoCompression) {
|
||||
IntegrationCreateFailTest(false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest, IntegrationCreateFailTestWithCompression) {
|
||||
IntegrationCreateFailTest(true);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest,
|
||||
IntegrationFullCapacityTestWithNoCompression) {
|
||||
IntegrationFullCapacityTest(false);
|
||||
}
|
||||
|
||||
TEST_F(CompressedSecondaryCacheTest,
|
||||
IntegrationFullCapacityTestWithCompression) {
|
||||
IntegrationFullCapacityTest(true);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
511
cache/fast_lru_cache.cc
vendored
511
cache/fast_lru_cache.cc
vendored
@ -1,511 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "cache/fast_lru_cache.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
#include "monitoring/statistics.h"
|
||||
#include "port/lang.h"
|
||||
#include "util/mutexlock.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
namespace fast_lru_cache {
|
||||
|
||||
LRUHandleTable::LRUHandleTable(int max_upper_hash_bits)
|
||||
: length_bits_(/* historical starting size*/ 4),
|
||||
list_(new LRUHandle* [size_t{1} << length_bits_] {}),
|
||||
elems_(0),
|
||||
max_length_bits_(max_upper_hash_bits) {}
|
||||
|
||||
LRUHandleTable::~LRUHandleTable() {
|
||||
ApplyToEntriesRange(
|
||||
[](LRUHandle* h) {
|
||||
if (!h->HasRefs()) {
|
||||
h->Free();
|
||||
}
|
||||
},
|
||||
0, uint32_t{1} << length_bits_);
|
||||
}
|
||||
|
||||
LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
|
||||
return *FindPointer(key, hash);
|
||||
}
|
||||
|
||||
LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
|
||||
LRUHandle** ptr = FindPointer(h->key(), h->hash);
|
||||
LRUHandle* old = *ptr;
|
||||
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
|
||||
*ptr = h;
|
||||
if (old == nullptr) {
|
||||
++elems_;
|
||||
if ((elems_ >> length_bits_) > 0) { // elems_ >= length
|
||||
// Since each cache entry is fairly large, we aim for a small
|
||||
// average linked list length (<= 1).
|
||||
Resize();
|
||||
}
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
|
||||
LRUHandle** ptr = FindPointer(key, hash);
|
||||
LRUHandle* result = *ptr;
|
||||
if (result != nullptr) {
|
||||
*ptr = result->next_hash;
|
||||
--elems_;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
|
||||
LRUHandle** ptr = &list_[hash >> (32 - length_bits_)];
|
||||
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
|
||||
ptr = &(*ptr)->next_hash;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void LRUHandleTable::Resize() {
|
||||
if (length_bits_ >= max_length_bits_) {
|
||||
// Due to reaching limit of hash information, if we made the table bigger,
|
||||
// we would allocate more addresses but only the same number would be used.
|
||||
return;
|
||||
}
|
||||
if (length_bits_ >= 31) {
|
||||
// Avoid undefined behavior shifting uint32_t by 32.
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t old_length = uint32_t{1} << length_bits_;
|
||||
int new_length_bits = length_bits_ + 1;
|
||||
std::unique_ptr<LRUHandle* []> new_list {
|
||||
new LRUHandle* [size_t{1} << new_length_bits] {}
|
||||
};
|
||||
uint32_t count = 0;
|
||||
for (uint32_t i = 0; i < old_length; i++) {
|
||||
LRUHandle* h = list_[i];
|
||||
while (h != nullptr) {
|
||||
LRUHandle* next = h->next_hash;
|
||||
uint32_t hash = h->hash;
|
||||
LRUHandle** ptr = &new_list[hash >> (32 - new_length_bits)];
|
||||
h->next_hash = *ptr;
|
||||
*ptr = h;
|
||||
h = next;
|
||||
count++;
|
||||
}
|
||||
}
|
||||
assert(elems_ == count);
|
||||
list_ = std::move(new_list);
|
||||
length_bits_ = new_length_bits;
|
||||
}
|
||||
|
||||
LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
int max_upper_hash_bits)
|
||||
: capacity_(0),
|
||||
strict_capacity_limit_(strict_capacity_limit),
|
||||
table_(max_upper_hash_bits),
|
||||
usage_(0),
|
||||
lru_usage_(0) {
|
||||
set_metadata_charge_policy(metadata_charge_policy);
|
||||
// Make empty circular linked list.
|
||||
lru_.next = &lru_;
|
||||
lru_.prev = &lru_;
|
||||
lru_low_pri_ = &lru_;
|
||||
SetCapacity(capacity);
|
||||
}
|
||||
|
||||
void LRUCacheShard::EraseUnRefEntries() {
|
||||
autovector<LRUHandle*> last_reference_list;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
while (lru_.next != &lru_) {
|
||||
LRUHandle* old = lru_.next;
|
||||
// LRU list contains only elements which can be evicted.
|
||||
assert(old->InCache() && !old->HasRefs());
|
||||
LRU_Remove(old);
|
||||
table_.Remove(old->key(), old->hash);
|
||||
old->SetInCache(false);
|
||||
size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= total_charge);
|
||||
usage_ -= total_charge;
|
||||
last_reference_list.push_back(old);
|
||||
}
|
||||
}
|
||||
|
||||
// Free the entries here outside of mutex for performance reasons.
|
||||
for (auto entry : last_reference_list) {
|
||||
entry->Free();
|
||||
}
|
||||
}
|
||||
|
||||
void LRUCacheShard::ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) {
|
||||
// The state is essentially going to be the starting hash, which works
|
||||
// nicely even if we resize between calls because we use upper-most
|
||||
// hash bits for table indexes.
|
||||
MutexLock l(&mutex_);
|
||||
uint32_t length_bits = table_.GetLengthBits();
|
||||
uint32_t length = uint32_t{1} << length_bits;
|
||||
|
||||
assert(average_entries_per_lock > 0);
|
||||
// Assuming we are called with same average_entries_per_lock repeatedly,
|
||||
// this simplifies some logic (index_end will not overflow).
|
||||
assert(average_entries_per_lock < length || *state == 0);
|
||||
|
||||
uint32_t index_begin = *state >> (32 - length_bits);
|
||||
uint32_t index_end = index_begin + average_entries_per_lock;
|
||||
if (index_end >= length) {
|
||||
// Going to end
|
||||
index_end = length;
|
||||
*state = UINT32_MAX;
|
||||
} else {
|
||||
*state = index_end << (32 - length_bits);
|
||||
}
|
||||
|
||||
table_.ApplyToEntriesRange(
|
||||
[callback](LRUHandle* h) {
|
||||
callback(h->key(), h->value, h->charge, h->deleter);
|
||||
},
|
||||
index_begin, index_end);
|
||||
}
|
||||
|
||||
void LRUCacheShard::LRU_Remove(LRUHandle* e) {
|
||||
assert(e->next != nullptr);
|
||||
assert(e->prev != nullptr);
|
||||
e->next->prev = e->prev;
|
||||
e->prev->next = e->next;
|
||||
e->prev = e->next = nullptr;
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(lru_usage_ >= total_charge);
|
||||
lru_usage_ -= total_charge;
|
||||
}
|
||||
|
||||
void LRUCacheShard::LRU_Insert(LRUHandle* e) {
|
||||
assert(e->next == nullptr);
|
||||
assert(e->prev == nullptr);
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
// Inset "e" to head of LRU list.
|
||||
e->next = &lru_;
|
||||
e->prev = lru_.prev;
|
||||
e->prev->next = e;
|
||||
e->next->prev = e;
|
||||
lru_usage_ += total_charge;
|
||||
}
|
||||
|
||||
void LRUCacheShard::EvictFromLRU(size_t charge,
|
||||
autovector<LRUHandle*>* deleted) {
|
||||
while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
|
||||
LRUHandle* old = lru_.next;
|
||||
// LRU list contains only elements which can be evicted.
|
||||
assert(old->InCache() && !old->HasRefs());
|
||||
LRU_Remove(old);
|
||||
table_.Remove(old->key(), old->hash);
|
||||
old->SetInCache(false);
|
||||
size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= old_total_charge);
|
||||
usage_ -= old_total_charge;
|
||||
deleted->push_back(old);
|
||||
}
|
||||
}
|
||||
|
||||
void LRUCacheShard::SetCapacity(size_t capacity) {
|
||||
autovector<LRUHandle*> last_reference_list;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
capacity_ = capacity;
|
||||
EvictFromLRU(0, &last_reference_list);
|
||||
}
|
||||
|
||||
// Free the entries here outside of mutex for performance reasons.
|
||||
for (auto entry : last_reference_list) {
|
||||
entry->Free();
|
||||
}
|
||||
}
|
||||
|
||||
void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
||||
MutexLock l(&mutex_);
|
||||
strict_capacity_limit_ = strict_capacity_limit;
|
||||
}
|
||||
|
||||
Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
|
||||
bool free_handle_on_fail) {
|
||||
Status s = Status::OK();
|
||||
autovector<LRUHandle*> last_reference_list;
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
|
||||
// Free the space following strict LRU policy until enough space
|
||||
// is freed or the lru list is empty.
|
||||
EvictFromLRU(total_charge, &last_reference_list);
|
||||
|
||||
if ((usage_ + total_charge) > capacity_ &&
|
||||
(strict_capacity_limit_ || handle == nullptr)) {
|
||||
e->SetInCache(false);
|
||||
if (handle == nullptr) {
|
||||
// Don't insert the entry but still return ok, as if the entry inserted
|
||||
// into cache and get evicted immediately.
|
||||
last_reference_list.push_back(e);
|
||||
} else {
|
||||
if (free_handle_on_fail) {
|
||||
delete[] reinterpret_cast<char*>(e);
|
||||
*handle = nullptr;
|
||||
}
|
||||
s = Status::Incomplete("Insert failed due to LRU cache being full.");
|
||||
}
|
||||
} else {
|
||||
// Insert into the cache. Note that the cache might get larger than its
|
||||
// capacity if not enough space was freed up.
|
||||
LRUHandle* old = table_.Insert(e);
|
||||
usage_ += total_charge;
|
||||
if (old != nullptr) {
|
||||
s = Status::OkOverwritten();
|
||||
assert(old->InCache());
|
||||
old->SetInCache(false);
|
||||
if (!old->HasRefs()) {
|
||||
// old is on LRU because it's in cache and its reference count is 0.
|
||||
LRU_Remove(old);
|
||||
size_t old_total_charge =
|
||||
old->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= old_total_charge);
|
||||
usage_ -= old_total_charge;
|
||||
last_reference_list.push_back(old);
|
||||
}
|
||||
}
|
||||
if (handle == nullptr) {
|
||||
LRU_Insert(e);
|
||||
} else {
|
||||
// If caller already holds a ref, no need to take one here.
|
||||
if (!e->HasRefs()) {
|
||||
e->Ref();
|
||||
}
|
||||
*handle = reinterpret_cast<Cache::Handle*>(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Free the entries here outside of mutex for performance reasons.
|
||||
for (auto entry : last_reference_list) {
|
||||
entry->Free();
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
|
||||
LRUHandle* e = nullptr;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
e = table_.Lookup(key, hash);
|
||||
if (e != nullptr) {
|
||||
assert(e->InCache());
|
||||
if (!e->HasRefs()) {
|
||||
// The entry is in LRU since it's in hash and has no external references
|
||||
LRU_Remove(e);
|
||||
}
|
||||
e->Ref();
|
||||
}
|
||||
}
|
||||
return reinterpret_cast<Cache::Handle*>(e);
|
||||
}
|
||||
|
||||
bool LRUCacheShard::Ref(Cache::Handle* h) {
|
||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(h);
|
||||
MutexLock l(&mutex_);
|
||||
// To create another reference - entry must be already externally referenced.
|
||||
assert(e->HasRefs());
|
||||
e->Ref();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
|
||||
if (handle == nullptr) {
|
||||
return false;
|
||||
}
|
||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
|
||||
bool last_reference = false;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
last_reference = e->Unref();
|
||||
if (last_reference && e->InCache()) {
|
||||
// The item is still in cache, and nobody else holds a reference to it.
|
||||
if (usage_ > capacity_ || erase_if_last_ref) {
|
||||
// The LRU list must be empty since the cache is full.
|
||||
assert(lru_.next == &lru_ || erase_if_last_ref);
|
||||
// Take this opportunity and remove the item.
|
||||
table_.Remove(e->key(), e->hash);
|
||||
e->SetInCache(false);
|
||||
} else {
|
||||
// Put the item back on the LRU list, and don't free it.
|
||||
LRU_Insert(e);
|
||||
last_reference = false;
|
||||
}
|
||||
}
|
||||
// If it was the last reference, then decrement the cache usage.
|
||||
if (last_reference) {
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= total_charge);
|
||||
usage_ -= total_charge;
|
||||
}
|
||||
}
|
||||
|
||||
// Free the entry here outside of mutex for performance reasons.
|
||||
if (last_reference) {
|
||||
e->Free();
|
||||
}
|
||||
return last_reference;
|
||||
}
|
||||
|
||||
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
||||
size_t charge, Cache::DeleterFn deleter,
|
||||
Cache::Handle** handle,
|
||||
Cache::Priority /*priority*/) {
|
||||
// Allocate the memory here outside of the mutex.
|
||||
// If the cache is full, we'll have to release it.
|
||||
// It shouldn't happen very often though.
|
||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(
|
||||
new char[sizeof(LRUHandle) - 1 + key.size()]);
|
||||
|
||||
e->value = value;
|
||||
e->flags = 0;
|
||||
e->deleter = deleter;
|
||||
e->charge = charge;
|
||||
e->key_length = key.size();
|
||||
e->hash = hash;
|
||||
e->refs = 0;
|
||||
e->next = e->prev = nullptr;
|
||||
e->SetInCache(true);
|
||||
memcpy(e->key_data, key.data(), key.size());
|
||||
|
||||
return InsertItem(e, handle, /* free_handle_on_fail */ true);
|
||||
}
|
||||
|
||||
void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
|
||||
LRUHandle* e;
|
||||
bool last_reference = false;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
e = table_.Remove(key, hash);
|
||||
if (e != nullptr) {
|
||||
assert(e->InCache());
|
||||
e->SetInCache(false);
|
||||
if (!e->HasRefs()) {
|
||||
// The entry is in LRU since it's in hash and has no external references
|
||||
LRU_Remove(e);
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= total_charge);
|
||||
usage_ -= total_charge;
|
||||
last_reference = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Free the entry here outside of mutex for performance reasons.
|
||||
// last_reference will only be true if e != nullptr.
|
||||
if (last_reference) {
|
||||
e->Free();
|
||||
}
|
||||
}
|
||||
|
||||
size_t LRUCacheShard::GetUsage() const {
|
||||
MutexLock l(&mutex_);
|
||||
return usage_;
|
||||
}
|
||||
|
||||
size_t LRUCacheShard::GetPinnedUsage() const {
|
||||
MutexLock l(&mutex_);
|
||||
assert(usage_ >= lru_usage_);
|
||||
return usage_ - lru_usage_;
|
||||
}
|
||||
|
||||
std::string LRUCacheShard::GetPrintableOptions() const { return std::string{}; }
|
||||
|
||||
LRUCache::LRUCache(size_t capacity, int num_shard_bits,
|
||||
bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy)
|
||||
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
|
||||
num_shards_ = 1 << num_shard_bits;
|
||||
shards_ = reinterpret_cast<LRUCacheShard*>(
|
||||
port::cacheline_aligned_alloc(sizeof(LRUCacheShard) * num_shards_));
|
||||
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
|
||||
for (int i = 0; i < num_shards_; i++) {
|
||||
new (&shards_[i])
|
||||
LRUCacheShard(per_shard, strict_capacity_limit, metadata_charge_policy,
|
||||
/* max_upper_hash_bits */ 32 - num_shard_bits);
|
||||
}
|
||||
}
|
||||
|
||||
LRUCache::~LRUCache() {
|
||||
if (shards_ != nullptr) {
|
||||
assert(num_shards_ > 0);
|
||||
for (int i = 0; i < num_shards_; i++) {
|
||||
shards_[i].~LRUCacheShard();
|
||||
}
|
||||
port::cacheline_aligned_free(shards_);
|
||||
}
|
||||
}
|
||||
|
||||
CacheShard* LRUCache::GetShard(uint32_t shard) {
|
||||
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||
}
|
||||
|
||||
const CacheShard* LRUCache::GetShard(uint32_t shard) const {
|
||||
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||
}
|
||||
|
||||
void* LRUCache::Value(Handle* handle) {
|
||||
return reinterpret_cast<const LRUHandle*>(handle)->value;
|
||||
}
|
||||
|
||||
size_t LRUCache::GetCharge(Handle* handle) const {
|
||||
return reinterpret_cast<const LRUHandle*>(handle)->charge;
|
||||
}
|
||||
|
||||
Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const {
|
||||
auto h = reinterpret_cast<const LRUHandle*>(handle);
|
||||
return h->deleter;
|
||||
}
|
||||
|
||||
uint32_t LRUCache::GetHash(Handle* handle) const {
|
||||
return reinterpret_cast<const LRUHandle*>(handle)->hash;
|
||||
}
|
||||
|
||||
void LRUCache::DisownData() {
|
||||
// Leak data only if that won't generate an ASAN/valgrind warning.
|
||||
if (!kMustFreeHeapAllocations) {
|
||||
shards_ = nullptr;
|
||||
num_shards_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace fast_lru_cache
|
||||
|
||||
std::shared_ptr<Cache> NewFastLRUCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
if (num_shard_bits >= 20) {
|
||||
return nullptr; // The cache cannot be sharded into too many fine pieces.
|
||||
}
|
||||
if (num_shard_bits < 0) {
|
||||
num_shard_bits = GetDefaultCacheShardBits(capacity);
|
||||
}
|
||||
return std::make_shared<fast_lru_cache::LRUCache>(
|
||||
capacity, num_shard_bits, strict_capacity_limit, metadata_charge_policy);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
299
cache/fast_lru_cache.h
vendored
299
cache/fast_lru_cache.h
vendored
@ -1,299 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "cache/sharded_cache.h"
|
||||
#include "port/lang.h"
|
||||
#include "port/malloc.h"
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/secondary_cache.h"
|
||||
#include "util/autovector.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace fast_lru_cache {
|
||||
|
||||
// An experimental (under development!) alternative to LRUCache
|
||||
|
||||
struct LRUHandle {
|
||||
void* value;
|
||||
Cache::DeleterFn deleter;
|
||||
LRUHandle* next_hash;
|
||||
LRUHandle* next;
|
||||
LRUHandle* prev;
|
||||
size_t charge; // TODO(opt): Only allow uint32_t?
|
||||
size_t key_length;
|
||||
// The hash of key(). Used for fast sharding and comparisons.
|
||||
uint32_t hash;
|
||||
// The number of external refs to this entry. The cache itself is not counted.
|
||||
uint32_t refs;
|
||||
|
||||
enum Flags : uint8_t {
|
||||
// Whether this entry is referenced by the hash table.
|
||||
IN_CACHE = (1 << 0),
|
||||
};
|
||||
uint8_t flags;
|
||||
|
||||
// Beginning of the key (MUST BE THE LAST FIELD IN THIS STRUCT!)
|
||||
char key_data[1];
|
||||
|
||||
Slice key() const { return Slice(key_data, key_length); }
|
||||
|
||||
// Increase the reference count by 1.
|
||||
void Ref() { refs++; }
|
||||
|
||||
// Just reduce the reference count by 1. Return true if it was last reference.
|
||||
bool Unref() {
|
||||
assert(refs > 0);
|
||||
refs--;
|
||||
return refs == 0;
|
||||
}
|
||||
|
||||
// Return true if there are external refs, false otherwise.
|
||||
bool HasRefs() const { return refs > 0; }
|
||||
|
||||
bool InCache() const { return flags & IN_CACHE; }
|
||||
|
||||
void SetInCache(bool in_cache) {
|
||||
if (in_cache) {
|
||||
flags |= IN_CACHE;
|
||||
} else {
|
||||
flags &= ~IN_CACHE;
|
||||
}
|
||||
}
|
||||
|
||||
void Free() {
|
||||
assert(refs == 0);
|
||||
if (deleter) {
|
||||
(*deleter)(key(), value);
|
||||
}
|
||||
delete[] reinterpret_cast<char*>(this);
|
||||
}
|
||||
|
||||
// Calculate the memory usage by metadata.
|
||||
inline size_t CalcTotalCharge(
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
size_t meta_charge = 0;
|
||||
if (metadata_charge_policy == kFullChargeCacheMetadata) {
|
||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||
meta_charge += malloc_usable_size(static_cast<void*>(this));
|
||||
#else
|
||||
// This is the size that is used when a new handle is created.
|
||||
meta_charge += sizeof(LRUHandle) - 1 + key_length;
|
||||
#endif
|
||||
}
|
||||
return charge + meta_charge;
|
||||
}
|
||||
};
|
||||
|
||||
// We provide our own simple hash table since it removes a whole bunch
|
||||
// of porting hacks and is also faster than some of the built-in hash
|
||||
// table implementations in some of the compiler/runtime combinations
|
||||
// we have tested. E.g., readrandom speeds up by ~5% over the g++
|
||||
// 4.4.3's builtin hashtable.
|
||||
class LRUHandleTable {
|
||||
public:
|
||||
// If the table uses more hash bits than `max_upper_hash_bits`,
|
||||
// it will eat into the bits used for sharding, which are constant
|
||||
// for a given LRUHandleTable.
|
||||
explicit LRUHandleTable(int max_upper_hash_bits);
|
||||
~LRUHandleTable();
|
||||
|
||||
LRUHandle* Lookup(const Slice& key, uint32_t hash);
|
||||
LRUHandle* Insert(LRUHandle* h);
|
||||
LRUHandle* Remove(const Slice& key, uint32_t hash);
|
||||
|
||||
template <typename T>
|
||||
void ApplyToEntriesRange(T func, uint32_t index_begin, uint32_t index_end) {
|
||||
for (uint32_t i = index_begin; i < index_end; i++) {
|
||||
LRUHandle* h = list_[i];
|
||||
while (h != nullptr) {
|
||||
auto n = h->next_hash;
|
||||
assert(h->InCache());
|
||||
func(h);
|
||||
h = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int GetLengthBits() const { return length_bits_; }
|
||||
|
||||
private:
|
||||
// Return a pointer to slot that points to a cache entry that
|
||||
// matches key/hash. If there is no such cache entry, return a
|
||||
// pointer to the trailing slot in the corresponding linked list.
|
||||
LRUHandle** FindPointer(const Slice& key, uint32_t hash);
|
||||
|
||||
void Resize();
|
||||
|
||||
// Number of hash bits (upper because lower bits used for sharding)
|
||||
// used for table index. Length == 1 << length_bits_
|
||||
int length_bits_;
|
||||
|
||||
// The table consists of an array of buckets where each bucket is
|
||||
// a linked list of cache entries that hash into the bucket.
|
||||
std::unique_ptr<LRUHandle*[]> list_;
|
||||
|
||||
// Number of elements currently in the table.
|
||||
uint32_t elems_;
|
||||
|
||||
// Set from max_upper_hash_bits (see constructor).
|
||||
const int max_length_bits_;
|
||||
};
|
||||
|
||||
// A single shard of sharded cache.
|
||||
class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
public:
|
||||
LRUCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
int max_upper_hash_bits);
|
||||
~LRUCacheShard() override = default;
|
||||
|
||||
// Separate from constructor so caller can easily make an array of LRUCache
|
||||
// if current usage is more than new capacity, the function will attempt to
|
||||
// free the needed space.
|
||||
void SetCapacity(size_t capacity) override;
|
||||
|
||||
// Set the flag to reject insertion if cache if full.
|
||||
void SetStrictCapacityLimit(bool strict_capacity_limit) override;
|
||||
|
||||
// Like Cache methods, but with an extra "hash" parameter.
|
||||
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
||||
Cache::DeleterFn deleter, Cache::Handle** handle,
|
||||
Cache::Priority priority) override;
|
||||
|
||||
Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||
const Cache::CacheItemHelper* helper, size_t charge,
|
||||
Cache::Handle** handle, Cache::Priority priority) override {
|
||||
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
|
||||
}
|
||||
|
||||
Cache::Handle* Lookup(const Slice& key, uint32_t hash,
|
||||
const Cache::CacheItemHelper* /*helper*/,
|
||||
const Cache::CreateCallback& /*create_cb*/,
|
||||
Cache::Priority /*priority*/, bool /*wait*/,
|
||||
Statistics* /*stats*/) override {
|
||||
return Lookup(key, hash);
|
||||
}
|
||||
Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
|
||||
|
||||
bool Release(Cache::Handle* handle, bool /*useful*/,
|
||||
bool erase_if_last_ref) override {
|
||||
return Release(handle, erase_if_last_ref);
|
||||
}
|
||||
bool IsReady(Cache::Handle* /*handle*/) override { return true; }
|
||||
void Wait(Cache::Handle* /*handle*/) override {}
|
||||
|
||||
bool Ref(Cache::Handle* handle) override;
|
||||
bool Release(Cache::Handle* handle, bool erase_if_last_ref = false) override;
|
||||
void Erase(const Slice& key, uint32_t hash) override;
|
||||
|
||||
size_t GetUsage() const override;
|
||||
size_t GetPinnedUsage() const override;
|
||||
|
||||
void ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) override;
|
||||
|
||||
void EraseUnRefEntries() override;
|
||||
|
||||
std::string GetPrintableOptions() const override;
|
||||
|
||||
private:
|
||||
friend class LRUCache;
|
||||
// Insert an item into the hash table and, if handle is null, insert into
|
||||
// the LRU list. Older items are evicted as necessary. If the cache is full
|
||||
// and free_handle_on_fail is true, the item is deleted and handle is set to
|
||||
// nullptr.
|
||||
Status InsertItem(LRUHandle* item, Cache::Handle** handle,
|
||||
bool free_handle_on_fail);
|
||||
|
||||
void LRU_Remove(LRUHandle* e);
|
||||
void LRU_Insert(LRUHandle* e);
|
||||
|
||||
// Free some space following strict LRU policy until enough space
|
||||
// to hold (usage_ + charge) is freed or the lru list is empty
|
||||
// This function is not thread safe - it needs to be executed while
|
||||
// holding the mutex_.
|
||||
void EvictFromLRU(size_t charge, autovector<LRUHandle*>* deleted);
|
||||
|
||||
// Initialized before use.
|
||||
size_t capacity_;
|
||||
|
||||
// Whether to reject insertion if cache reaches its full capacity.
|
||||
bool strict_capacity_limit_;
|
||||
|
||||
// Dummy head of LRU list.
|
||||
// lru.prev is newest entry, lru.next is oldest entry.
|
||||
// LRU contains items which can be evicted, ie reference only by cache
|
||||
LRUHandle lru_;
|
||||
|
||||
// Pointer to head of low-pri pool in LRU list.
|
||||
LRUHandle* lru_low_pri_;
|
||||
|
||||
// ------------^^^^^^^^^^^^^-----------
|
||||
// Not frequently modified data members
|
||||
// ------------------------------------
|
||||
//
|
||||
// We separate data members that are updated frequently from the ones that
|
||||
// are not frequently updated so that they don't share the same cache line
|
||||
// which will lead into false cache sharing
|
||||
//
|
||||
// ------------------------------------
|
||||
// Frequently modified data members
|
||||
// ------------vvvvvvvvvvvvv-----------
|
||||
LRUHandleTable table_;
|
||||
|
||||
// Memory size for entries residing in the cache.
|
||||
size_t usage_;
|
||||
|
||||
// Memory size for entries residing only in the LRU list.
|
||||
size_t lru_usage_;
|
||||
|
||||
// mutex_ protects the following state.
|
||||
// We don't count mutex_ as the cache's internal state so semantically we
|
||||
// don't mind mutex_ invoking the non-const actions.
|
||||
mutable port::Mutex mutex_;
|
||||
};
|
||||
|
||||
class LRUCache
|
||||
#ifdef NDEBUG
|
||||
final
|
||||
#endif
|
||||
: public ShardedCache {
|
||||
public:
|
||||
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy =
|
||||
kDontChargeCacheMetadata);
|
||||
~LRUCache() override;
|
||||
const char* Name() const override { return "LRUCache"; }
|
||||
CacheShard* GetShard(uint32_t shard) override;
|
||||
const CacheShard* GetShard(uint32_t shard) const override;
|
||||
void* Value(Handle* handle) override;
|
||||
size_t GetCharge(Handle* handle) const override;
|
||||
uint32_t GetHash(Handle* handle) const override;
|
||||
DeleterFn GetDeleter(Handle* handle) const override;
|
||||
void DisownData() override;
|
||||
|
||||
private:
|
||||
LRUCacheShard* shards_ = nullptr;
|
||||
int num_shards_ = 0;
|
||||
};
|
||||
} // namespace fast_lru_cache
|
||||
|
||||
std::shared_ptr<Cache> NewFastLRUCache(
|
||||
size_t capacity, int num_shard_bits = -1,
|
||||
bool strict_capacity_limit = false,
|
||||
CacheMetadataChargePolicy metadata_charge_policy =
|
||||
kDefaultCacheMetadataChargePolicy);
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
640
cache/lru_cache.cc
vendored
640
cache/lru_cache.cc
vendored
@ -7,34 +7,32 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
|
||||
#include "cache/lru_cache.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string>
|
||||
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
#include "monitoring/statistics.h"
|
||||
#include "port/lang.h"
|
||||
#include "util/mutexlock.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace lru_cache {
|
||||
namespace rocksdb {
|
||||
|
||||
LRUHandleTable::LRUHandleTable(int max_upper_hash_bits)
|
||||
: length_bits_(/* historical starting size*/ 4),
|
||||
list_(new LRUHandle* [size_t{1} << length_bits_] {}),
|
||||
elems_(0),
|
||||
max_length_bits_(max_upper_hash_bits) {}
|
||||
LRUHandleTable::LRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
|
||||
Resize();
|
||||
}
|
||||
|
||||
LRUHandleTable::~LRUHandleTable() {
|
||||
ApplyToEntriesRange(
|
||||
[](LRUHandle* h) {
|
||||
if (!h->HasRefs()) {
|
||||
h->Free();
|
||||
}
|
||||
},
|
||||
0, uint32_t{1} << length_bits_);
|
||||
ApplyToAllCacheEntries([](LRUHandle* h) {
|
||||
if (h->refs == 1) {
|
||||
h->Free();
|
||||
}
|
||||
});
|
||||
delete[] list_;
|
||||
}
|
||||
|
||||
LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
|
||||
@ -48,7 +46,7 @@ LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
|
||||
*ptr = h;
|
||||
if (old == nullptr) {
|
||||
++elems_;
|
||||
if ((elems_ >> length_bits_) > 0) { // elems_ >= length
|
||||
if (elems_ > length_) {
|
||||
// Since each cache entry is fairly large, we aim for a small
|
||||
// average linked list length (<= 1).
|
||||
Resize();
|
||||
@ -68,7 +66,7 @@ LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
|
||||
}
|
||||
|
||||
LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
|
||||
LRUHandle** ptr = &list_[hash >> (32 - length_bits_)];
|
||||
LRUHandle** ptr = &list_[hash & (length_ - 1)];
|
||||
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
|
||||
ptr = &(*ptr)->next_hash;
|
||||
}
|
||||
@ -76,28 +74,19 @@ LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
|
||||
}
|
||||
|
||||
void LRUHandleTable::Resize() {
|
||||
if (length_bits_ >= max_length_bits_) {
|
||||
// Due to reaching limit of hash information, if we made the table bigger,
|
||||
// we would allocate more addresses but only the same number would be used.
|
||||
return;
|
||||
uint32_t new_length = 16;
|
||||
while (new_length < elems_ * 1.5) {
|
||||
new_length *= 2;
|
||||
}
|
||||
if (length_bits_ >= 31) {
|
||||
// Avoid undefined behavior shifting uint32_t by 32.
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t old_length = uint32_t{1} << length_bits_;
|
||||
int new_length_bits = length_bits_ + 1;
|
||||
std::unique_ptr<LRUHandle* []> new_list {
|
||||
new LRUHandle* [size_t{1} << new_length_bits] {}
|
||||
};
|
||||
LRUHandle** new_list = new LRUHandle*[new_length];
|
||||
memset(new_list, 0, sizeof(new_list[0]) * new_length);
|
||||
uint32_t count = 0;
|
||||
for (uint32_t i = 0; i < old_length; i++) {
|
||||
for (uint32_t i = 0; i < length_; i++) {
|
||||
LRUHandle* h = list_[i];
|
||||
while (h != nullptr) {
|
||||
LRUHandle* next = h->next_hash;
|
||||
uint32_t hash = h->hash;
|
||||
LRUHandle** ptr = &new_list[hash >> (32 - new_length_bits)];
|
||||
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
|
||||
h->next_hash = *ptr;
|
||||
*ptr = h;
|
||||
h = next;
|
||||
@ -105,47 +94,53 @@ void LRUHandleTable::Resize() {
|
||||
}
|
||||
}
|
||||
assert(elems_ == count);
|
||||
list_ = std::move(new_list);
|
||||
length_bits_ = new_length_bits;
|
||||
delete[] list_;
|
||||
list_ = new_list;
|
||||
length_ = new_length;
|
||||
}
|
||||
|
||||
LRUCacheShard::LRUCacheShard(
|
||||
size_t capacity, bool strict_capacity_limit, double high_pri_pool_ratio,
|
||||
bool use_adaptive_mutex, CacheMetadataChargePolicy metadata_charge_policy,
|
||||
int max_upper_hash_bits,
|
||||
const std::shared_ptr<SecondaryCache>& secondary_cache)
|
||||
LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
bool use_adaptive_mutex)
|
||||
: capacity_(0),
|
||||
high_pri_pool_usage_(0),
|
||||
strict_capacity_limit_(strict_capacity_limit),
|
||||
high_pri_pool_ratio_(high_pri_pool_ratio),
|
||||
high_pri_pool_capacity_(0),
|
||||
table_(max_upper_hash_bits),
|
||||
usage_(0),
|
||||
lru_usage_(0),
|
||||
mutex_(use_adaptive_mutex),
|
||||
secondary_cache_(secondary_cache) {
|
||||
set_metadata_charge_policy(metadata_charge_policy);
|
||||
// Make empty circular linked list.
|
||||
mutex_(use_adaptive_mutex) {
|
||||
// Make empty circular linked list
|
||||
lru_.next = &lru_;
|
||||
lru_.prev = &lru_;
|
||||
lru_low_pri_ = &lru_;
|
||||
SetCapacity(capacity);
|
||||
}
|
||||
|
||||
LRUCacheShard::~LRUCacheShard() {}
|
||||
|
||||
bool LRUCacheShard::Unref(LRUHandle* e) {
|
||||
assert(e->refs > 0);
|
||||
e->refs--;
|
||||
return e->refs == 0;
|
||||
}
|
||||
|
||||
// Call deleter and free
|
||||
|
||||
void LRUCacheShard::EraseUnRefEntries() {
|
||||
autovector<LRUHandle*> last_reference_list;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
while (lru_.next != &lru_) {
|
||||
LRUHandle* old = lru_.next;
|
||||
// LRU list contains only elements which can be evicted.
|
||||
assert(old->InCache() && !old->HasRefs());
|
||||
assert(old->InCache());
|
||||
assert(old->refs ==
|
||||
1); // LRU list contains elements which may be evicted
|
||||
LRU_Remove(old);
|
||||
table_.Remove(old->key(), old->hash);
|
||||
old->SetInCache(false);
|
||||
size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= total_charge);
|
||||
usage_ -= total_charge;
|
||||
Unref(old);
|
||||
usage_ -= old->charge;
|
||||
last_reference_list.push_back(old);
|
||||
}
|
||||
}
|
||||
@ -155,50 +150,24 @@ void LRUCacheShard::EraseUnRefEntries() {
|
||||
}
|
||||
}
|
||||
|
||||
void LRUCacheShard::ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) {
|
||||
// The state is essentially going to be the starting hash, which works
|
||||
// nicely even if we resize between calls because we use upper-most
|
||||
// hash bits for table indexes.
|
||||
MutexLock l(&mutex_);
|
||||
uint32_t length_bits = table_.GetLengthBits();
|
||||
uint32_t length = uint32_t{1} << length_bits;
|
||||
|
||||
assert(average_entries_per_lock > 0);
|
||||
// Assuming we are called with same average_entries_per_lock repeatedly,
|
||||
// this simplifies some logic (index_end will not overflow).
|
||||
assert(average_entries_per_lock < length || *state == 0);
|
||||
|
||||
uint32_t index_begin = *state >> (32 - length_bits);
|
||||
uint32_t index_end = index_begin + average_entries_per_lock;
|
||||
if (index_end >= length) {
|
||||
// Going to end
|
||||
index_end = length;
|
||||
*state = UINT32_MAX;
|
||||
} else {
|
||||
*state = index_end << (32 - length_bits);
|
||||
void LRUCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) {
|
||||
if (thread_safe) {
|
||||
mutex_.Lock();
|
||||
}
|
||||
table_.ApplyToAllCacheEntries(
|
||||
[callback](LRUHandle* h) { callback(h->value, h->charge); });
|
||||
if (thread_safe) {
|
||||
mutex_.Unlock();
|
||||
}
|
||||
|
||||
table_.ApplyToEntriesRange(
|
||||
[callback](LRUHandle* h) {
|
||||
DeleterFn deleter = h->IsSecondaryCacheCompatible()
|
||||
? h->info_.helper->del_cb
|
||||
: h->info_.deleter;
|
||||
callback(h->key(), h->value, h->charge, deleter);
|
||||
},
|
||||
index_begin, index_end);
|
||||
}
|
||||
|
||||
void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
|
||||
MutexLock l(&mutex_);
|
||||
*lru = &lru_;
|
||||
*lru_low_pri = lru_low_pri_;
|
||||
}
|
||||
|
||||
size_t LRUCacheShard::TEST_GetLRUSize() {
|
||||
MutexLock l(&mutex_);
|
||||
LRUHandle* lru_handle = lru_.next;
|
||||
size_t lru_size = 0;
|
||||
while (lru_handle != &lru_) {
|
||||
@ -222,19 +191,16 @@ void LRUCacheShard::LRU_Remove(LRUHandle* e) {
|
||||
e->next->prev = e->prev;
|
||||
e->prev->next = e->next;
|
||||
e->prev = e->next = nullptr;
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(lru_usage_ >= total_charge);
|
||||
lru_usage_ -= total_charge;
|
||||
lru_usage_ -= e->charge;
|
||||
if (e->InHighPriPool()) {
|
||||
assert(high_pri_pool_usage_ >= total_charge);
|
||||
high_pri_pool_usage_ -= total_charge;
|
||||
assert(high_pri_pool_usage_ >= e->charge);
|
||||
high_pri_pool_usage_ -= e->charge;
|
||||
}
|
||||
}
|
||||
|
||||
void LRUCacheShard::LRU_Insert(LRUHandle* e) {
|
||||
assert(e->next == nullptr);
|
||||
assert(e->prev == nullptr);
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
|
||||
// Inset "e" to head of LRU list.
|
||||
e->next = &lru_;
|
||||
@ -242,7 +208,7 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) {
|
||||
e->prev->next = e;
|
||||
e->next->prev = e;
|
||||
e->SetInHighPriPool(true);
|
||||
high_pri_pool_usage_ += total_charge;
|
||||
high_pri_pool_usage_ += e->charge;
|
||||
MaintainPoolSize();
|
||||
} else {
|
||||
// Insert "e" to the head of low-pri pool. Note that when
|
||||
@ -254,7 +220,7 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) {
|
||||
e->SetInHighPriPool(false);
|
||||
lru_low_pri_ = e;
|
||||
}
|
||||
lru_usage_ += total_charge;
|
||||
lru_usage_ += e->charge;
|
||||
}
|
||||
|
||||
void LRUCacheShard::MaintainPoolSize() {
|
||||
@ -263,25 +229,21 @@ void LRUCacheShard::MaintainPoolSize() {
|
||||
lru_low_pri_ = lru_low_pri_->next;
|
||||
assert(lru_low_pri_ != &lru_);
|
||||
lru_low_pri_->SetInHighPriPool(false);
|
||||
size_t total_charge =
|
||||
lru_low_pri_->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(high_pri_pool_usage_ >= total_charge);
|
||||
high_pri_pool_usage_ -= total_charge;
|
||||
high_pri_pool_usage_ -= lru_low_pri_->charge;
|
||||
}
|
||||
}
|
||||
|
||||
void LRUCacheShard::EvictFromLRU(size_t charge,
|
||||
autovector<LRUHandle*>* deleted) {
|
||||
while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
|
||||
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
|
||||
LRUHandle* old = lru_.next;
|
||||
// LRU list contains only elements which can be evicted.
|
||||
assert(old->InCache() && !old->HasRefs());
|
||||
assert(old->InCache());
|
||||
assert(old->refs == 1); // LRU list contains elements which may be evicted
|
||||
LRU_Remove(old);
|
||||
table_.Remove(old->key(), old->hash);
|
||||
old->SetInCache(false);
|
||||
size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= old_total_charge);
|
||||
usage_ -= old_total_charge;
|
||||
Unref(old);
|
||||
usage_ -= old->charge;
|
||||
deleted->push_back(old);
|
||||
}
|
||||
}
|
||||
@ -294,15 +256,9 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
|
||||
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
|
||||
EvictFromLRU(0, &last_reference_list);
|
||||
}
|
||||
|
||||
// Try to insert the evicted entries into tiered cache.
|
||||
// Free the entries outside of mutex for performance reasons.
|
||||
// we free the entries here outside of mutex for
|
||||
// performance reasons
|
||||
for (auto entry : last_reference_list) {
|
||||
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
|
||||
!entry->IsInSecondaryCache()) {
|
||||
secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
|
||||
.PermitUncheckedError();
|
||||
}
|
||||
entry->Free();
|
||||
}
|
||||
}
|
||||
@ -312,193 +268,27 @@ void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
||||
strict_capacity_limit_ = strict_capacity_limit;
|
||||
}
|
||||
|
||||
Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
|
||||
bool free_handle_on_fail) {
|
||||
Status s = Status::OK();
|
||||
autovector<LRUHandle*> last_reference_list;
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
|
||||
// Free the space following strict LRU policy until enough space
|
||||
// is freed or the lru list is empty.
|
||||
EvictFromLRU(total_charge, &last_reference_list);
|
||||
|
||||
if ((usage_ + total_charge) > capacity_ &&
|
||||
(strict_capacity_limit_ || handle == nullptr)) {
|
||||
e->SetInCache(false);
|
||||
if (handle == nullptr) {
|
||||
// Don't insert the entry but still return ok, as if the entry inserted
|
||||
// into cache and get evicted immediately.
|
||||
last_reference_list.push_back(e);
|
||||
} else {
|
||||
if (free_handle_on_fail) {
|
||||
delete[] reinterpret_cast<char*>(e);
|
||||
*handle = nullptr;
|
||||
}
|
||||
s = Status::Incomplete("Insert failed due to LRU cache being full.");
|
||||
}
|
||||
} else {
|
||||
// Insert into the cache. Note that the cache might get larger than its
|
||||
// capacity if not enough space was freed up.
|
||||
LRUHandle* old = table_.Insert(e);
|
||||
usage_ += total_charge;
|
||||
if (old != nullptr) {
|
||||
s = Status::OkOverwritten();
|
||||
assert(old->InCache());
|
||||
old->SetInCache(false);
|
||||
if (!old->HasRefs()) {
|
||||
// old is on LRU because it's in cache and its reference count is 0.
|
||||
LRU_Remove(old);
|
||||
size_t old_total_charge =
|
||||
old->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= old_total_charge);
|
||||
usage_ -= old_total_charge;
|
||||
last_reference_list.push_back(old);
|
||||
}
|
||||
}
|
||||
if (handle == nullptr) {
|
||||
LRU_Insert(e);
|
||||
} else {
|
||||
// If caller already holds a ref, no need to take one here.
|
||||
if (!e->HasRefs()) {
|
||||
e->Ref();
|
||||
}
|
||||
*handle = reinterpret_cast<Cache::Handle*>(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to insert the evicted entries into the secondary cache.
|
||||
// Free the entries here outside of mutex for performance reasons.
|
||||
for (auto entry : last_reference_list) {
|
||||
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
|
||||
!entry->IsInSecondaryCache()) {
|
||||
secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
|
||||
.PermitUncheckedError();
|
||||
}
|
||||
entry->Free();
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void LRUCacheShard::Promote(LRUHandle* e) {
|
||||
SecondaryCacheResultHandle* secondary_handle = e->sec_handle;
|
||||
|
||||
assert(secondary_handle->IsReady());
|
||||
e->SetIncomplete(false);
|
||||
e->SetInCache(true);
|
||||
e->value = secondary_handle->Value();
|
||||
e->charge = secondary_handle->Size();
|
||||
delete secondary_handle;
|
||||
|
||||
// This call could fail if the cache is over capacity and
|
||||
// strict_capacity_limit_ is true. In such a case, we don't want
|
||||
// InsertItem() to free the handle, since the item is already in memory
|
||||
// and the caller will most likely just read from disk if we erase it here.
|
||||
if (e->value) {
|
||||
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(e);
|
||||
Status s = InsertItem(e, &handle, /*free_handle_on_fail=*/false);
|
||||
if (!s.ok()) {
|
||||
// Item is in memory, but not accounted against the cache capacity.
|
||||
// When the handle is released, the item should get deleted.
|
||||
assert(!e->InCache());
|
||||
}
|
||||
} else {
|
||||
// Since the secondary cache lookup failed, mark the item as not in cache
|
||||
// Don't charge the cache as its only metadata that'll shortly be released
|
||||
MutexLock l(&mutex_);
|
||||
e->charge = 0;
|
||||
e->SetInCache(false);
|
||||
}
|
||||
}
|
||||
|
||||
Cache::Handle* LRUCacheShard::Lookup(
|
||||
const Slice& key, uint32_t hash,
|
||||
const ShardedCache::CacheItemHelper* helper,
|
||||
const ShardedCache::CreateCallback& create_cb, Cache::Priority priority,
|
||||
bool wait, Statistics* stats) {
|
||||
LRUHandle* e = nullptr;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
e = table_.Lookup(key, hash);
|
||||
if (e != nullptr) {
|
||||
assert(e->InCache());
|
||||
if (!e->HasRefs()) {
|
||||
// The entry is in LRU since it's in hash and has no external references
|
||||
LRU_Remove(e);
|
||||
}
|
||||
e->Ref();
|
||||
e->SetHit();
|
||||
}
|
||||
}
|
||||
|
||||
// If handle table lookup failed, then allocate a handle outside the
|
||||
// mutex if we're going to lookup in the secondary cache.
|
||||
// Only support synchronous for now.
|
||||
// TODO: Support asynchronous lookup in secondary cache
|
||||
if (!e && secondary_cache_ && helper && helper->saveto_cb) {
|
||||
// For objects from the secondary cache, we expect the caller to provide
|
||||
// a way to create/delete the primary cache object. The only case where
|
||||
// a deleter would not be required is for dummy entries inserted for
|
||||
// accounting purposes, which we won't demote to the secondary cache
|
||||
// anyway.
|
||||
assert(create_cb && helper->del_cb);
|
||||
bool is_in_sec_cache{false};
|
||||
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle =
|
||||
secondary_cache_->Lookup(key, create_cb, wait, is_in_sec_cache);
|
||||
if (secondary_handle != nullptr) {
|
||||
e = reinterpret_cast<LRUHandle*>(
|
||||
new char[sizeof(LRUHandle) - 1 + key.size()]);
|
||||
|
||||
e->flags = 0;
|
||||
e->SetSecondaryCacheCompatible(true);
|
||||
e->info_.helper = helper;
|
||||
e->key_length = key.size();
|
||||
e->hash = hash;
|
||||
e->refs = 0;
|
||||
e->next = e->prev = nullptr;
|
||||
e->SetPriority(priority);
|
||||
memcpy(e->key_data, key.data(), key.size());
|
||||
e->value = nullptr;
|
||||
e->sec_handle = secondary_handle.release();
|
||||
e->Ref();
|
||||
|
||||
if (wait) {
|
||||
Promote(e);
|
||||
e->SetIsInSecondaryCache(is_in_sec_cache);
|
||||
if (!e->value) {
|
||||
// The secondary cache returned a handle, but the lookup failed.
|
||||
e->Unref();
|
||||
e->Free();
|
||||
e = nullptr;
|
||||
} else {
|
||||
PERF_COUNTER_ADD(secondary_cache_hit_count, 1);
|
||||
RecordTick(stats, SECONDARY_CACHE_HITS);
|
||||
}
|
||||
} else {
|
||||
// If wait is false, we always return a handle and let the caller
|
||||
// release the handle after checking for success or failure.
|
||||
e->SetIncomplete(true);
|
||||
e->SetIsInSecondaryCache(is_in_sec_cache);
|
||||
// This may be slightly inaccurate, if the lookup eventually fails.
|
||||
// But the probability is very low.
|
||||
PERF_COUNTER_ADD(secondary_cache_hit_count, 1);
|
||||
RecordTick(stats, SECONDARY_CACHE_HITS);
|
||||
}
|
||||
Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
|
||||
MutexLock l(&mutex_);
|
||||
LRUHandle* e = table_.Lookup(key, hash);
|
||||
if (e != nullptr) {
|
||||
assert(e->InCache());
|
||||
if (e->refs == 1) {
|
||||
LRU_Remove(e);
|
||||
}
|
||||
e->refs++;
|
||||
e->SetHit();
|
||||
}
|
||||
return reinterpret_cast<Cache::Handle*>(e);
|
||||
}
|
||||
|
||||
bool LRUCacheShard::Ref(Cache::Handle* h) {
|
||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(h);
|
||||
LRUHandle* handle = reinterpret_cast<LRUHandle*>(h);
|
||||
MutexLock l(&mutex_);
|
||||
// To create another reference - entry must be already externally referenced.
|
||||
assert(e->HasRefs());
|
||||
e->Ref();
|
||||
if (handle->InCache() && handle->refs == 1) {
|
||||
LRU_Remove(handle);
|
||||
}
|
||||
handle->refs++;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -509,7 +299,7 @@ void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
|
||||
MaintainPoolSize();
|
||||
}
|
||||
|
||||
bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
|
||||
bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
|
||||
if (handle == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -517,34 +307,30 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
|
||||
bool last_reference = false;
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
last_reference = e->Unref();
|
||||
if (last_reference && e->InCache()) {
|
||||
// The item is still in cache, and nobody else holds a reference to it.
|
||||
if (usage_ > capacity_ || erase_if_last_ref) {
|
||||
// The LRU list must be empty since the cache is full.
|
||||
assert(lru_.next == &lru_ || erase_if_last_ref);
|
||||
// Take this opportunity and remove the item.
|
||||
last_reference = Unref(e);
|
||||
if (last_reference) {
|
||||
usage_ -= e->charge;
|
||||
}
|
||||
if (e->refs == 1 && e->InCache()) {
|
||||
// The item is still in cache, and nobody else holds a reference to it
|
||||
if (usage_ > capacity_ || force_erase) {
|
||||
// the cache is full
|
||||
// The LRU list must be empty since the cache is full
|
||||
assert(!(usage_ > capacity_) || lru_.next == &lru_);
|
||||
// take this opportunity and remove the item
|
||||
table_.Remove(e->key(), e->hash);
|
||||
e->SetInCache(false);
|
||||
Unref(e);
|
||||
usage_ -= e->charge;
|
||||
last_reference = true;
|
||||
} else {
|
||||
// Put the item back on the LRU list, and don't free it.
|
||||
// put the item on the list to be potentially freed
|
||||
LRU_Insert(e);
|
||||
last_reference = false;
|
||||
}
|
||||
}
|
||||
// If it was the last reference, and the entry is either not secondary
|
||||
// cache compatible (i.e a dummy entry for accounting), or is secondary
|
||||
// cache compatible and has a non-null value, then decrement the cache
|
||||
// usage. If value is null in the latter case, taht means the lookup
|
||||
// failed and we didn't charge the cache.
|
||||
if (last_reference && (!e->IsSecondaryCacheCompatible() || e->value)) {
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= total_charge);
|
||||
usage_ -= total_charge;
|
||||
}
|
||||
}
|
||||
|
||||
// Free the entry here outside of mutex for performance reasons.
|
||||
// free outside of mutex
|
||||
if (last_reference) {
|
||||
e->Free();
|
||||
}
|
||||
@ -554,35 +340,79 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
|
||||
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
||||
size_t charge,
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
const Cache::CacheItemHelper* helper,
|
||||
Cache::Handle** handle, Cache::Priority priority) {
|
||||
// Allocate the memory here outside of the mutex.
|
||||
// If the cache is full, we'll have to release it.
|
||||
// Allocate the memory here outside of the mutex
|
||||
// If the cache is full, we'll have to release it
|
||||
// It shouldn't happen very often though.
|
||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(
|
||||
new char[sizeof(LRUHandle) - 1 + key.size()]);
|
||||
Status s;
|
||||
autovector<LRUHandle*> last_reference_list;
|
||||
|
||||
e->value = value;
|
||||
e->flags = 0;
|
||||
if (helper) {
|
||||
e->SetSecondaryCacheCompatible(true);
|
||||
e->info_.helper = helper;
|
||||
} else {
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
e->is_secondary_cache_compatible_for_tsan = false;
|
||||
#endif // __SANITIZE_THREAD__
|
||||
e->info_.deleter = deleter;
|
||||
}
|
||||
e->deleter = deleter;
|
||||
e->charge = charge;
|
||||
e->key_length = key.size();
|
||||
e->flags = 0;
|
||||
e->hash = hash;
|
||||
e->refs = 0;
|
||||
e->refs = (handle == nullptr
|
||||
? 1
|
||||
: 2); // One from LRUCache, one for the returned handle
|
||||
e->next = e->prev = nullptr;
|
||||
e->SetInCache(true);
|
||||
e->SetPriority(priority);
|
||||
memcpy(e->key_data, key.data(), key.size());
|
||||
|
||||
return InsertItem(e, handle, /* free_handle_on_fail */ true);
|
||||
{
|
||||
MutexLock l(&mutex_);
|
||||
|
||||
// Free the space following strict LRU policy until enough space
|
||||
// is freed or the lru list is empty
|
||||
EvictFromLRU(charge, &last_reference_list);
|
||||
|
||||
if (usage_ - lru_usage_ + charge > capacity_ &&
|
||||
(strict_capacity_limit_ || handle == nullptr)) {
|
||||
if (handle == nullptr) {
|
||||
// Don't insert the entry but still return ok, as if the entry inserted
|
||||
// into cache and get evicted immediately.
|
||||
last_reference_list.push_back(e);
|
||||
} else {
|
||||
delete[] reinterpret_cast<char*>(e);
|
||||
*handle = nullptr;
|
||||
s = Status::Incomplete("Insert failed due to LRU cache being full.");
|
||||
}
|
||||
} else {
|
||||
// insert into the cache
|
||||
// note that the cache might get larger than its capacity if not enough
|
||||
// space was freed
|
||||
LRUHandle* old = table_.Insert(e);
|
||||
usage_ += e->charge;
|
||||
if (old != nullptr) {
|
||||
old->SetInCache(false);
|
||||
if (Unref(old)) {
|
||||
usage_ -= old->charge;
|
||||
// old is on LRU because it's in cache and its reference count
|
||||
// was just 1 (Unref returned 0)
|
||||
LRU_Remove(old);
|
||||
last_reference_list.push_back(old);
|
||||
}
|
||||
}
|
||||
if (handle == nullptr) {
|
||||
LRU_Insert(e);
|
||||
} else {
|
||||
*handle = reinterpret_cast<Cache::Handle*>(e);
|
||||
}
|
||||
s = Status::OK();
|
||||
}
|
||||
}
|
||||
|
||||
// we free the entries here outside of mutex for
|
||||
// performance reasons
|
||||
for (auto entry : last_reference_list) {
|
||||
entry->Free();
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
|
||||
@ -592,38 +422,24 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
|
||||
MutexLock l(&mutex_);
|
||||
e = table_.Remove(key, hash);
|
||||
if (e != nullptr) {
|
||||
assert(e->InCache());
|
||||
e->SetInCache(false);
|
||||
if (!e->HasRefs()) {
|
||||
// The entry is in LRU since it's in hash and has no external references
|
||||
LRU_Remove(e);
|
||||
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
|
||||
assert(usage_ >= total_charge);
|
||||
usage_ -= total_charge;
|
||||
last_reference = true;
|
||||
last_reference = Unref(e);
|
||||
if (last_reference) {
|
||||
usage_ -= e->charge;
|
||||
}
|
||||
if (last_reference && e->InCache()) {
|
||||
LRU_Remove(e);
|
||||
}
|
||||
e->SetInCache(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Free the entry here outside of mutex for performance reasons.
|
||||
// last_reference will only be true if e != nullptr.
|
||||
// mutex not held here
|
||||
// last_reference will only be true if e != nullptr
|
||||
if (last_reference) {
|
||||
e->Free();
|
||||
}
|
||||
}
|
||||
|
||||
bool LRUCacheShard::IsReady(Cache::Handle* handle) {
|
||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
|
||||
MutexLock l(&mutex_);
|
||||
bool ready = true;
|
||||
if (e->IsPending()) {
|
||||
assert(secondary_cache_);
|
||||
assert(e->sec_handle);
|
||||
ready = e->sec_handle->IsReady();
|
||||
}
|
||||
return ready;
|
||||
}
|
||||
|
||||
size_t LRUCacheShard::GetUsage() const {
|
||||
MutexLock l(&mutex_);
|
||||
return usage_;
|
||||
@ -649,9 +465,7 @@ std::string LRUCacheShard::GetPrintableOptions() const {
|
||||
LRUCache::LRUCache(size_t capacity, int num_shard_bits,
|
||||
bool strict_capacity_limit, double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> allocator,
|
||||
bool use_adaptive_mutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
const std::shared_ptr<SecondaryCache>& secondary_cache)
|
||||
bool use_adaptive_mutex)
|
||||
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||
std::move(allocator)) {
|
||||
num_shards_ = 1 << num_shard_bits;
|
||||
@ -659,12 +473,10 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
|
||||
port::cacheline_aligned_alloc(sizeof(LRUCacheShard) * num_shards_));
|
||||
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
|
||||
for (int i = 0; i < num_shards_; i++) {
|
||||
new (&shards_[i]) LRUCacheShard(
|
||||
per_shard, strict_capacity_limit, high_pri_pool_ratio,
|
||||
use_adaptive_mutex, metadata_charge_policy,
|
||||
/* max_upper_hash_bits */ 32 - num_shard_bits, secondary_cache);
|
||||
new (&shards_[i])
|
||||
LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio,
|
||||
use_adaptive_mutex);
|
||||
}
|
||||
secondary_cache_ = secondary_cache;
|
||||
}
|
||||
|
||||
LRUCache::~LRUCache() {
|
||||
@ -677,11 +489,11 @@ LRUCache::~LRUCache() {
|
||||
}
|
||||
}
|
||||
|
||||
CacheShard* LRUCache::GetShard(uint32_t shard) {
|
||||
CacheShard* LRUCache::GetShard(int shard) {
|
||||
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||
}
|
||||
|
||||
const CacheShard* LRUCache::GetShard(uint32_t shard) const {
|
||||
const CacheShard* LRUCache::GetShard(int shard) const {
|
||||
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
||||
}
|
||||
|
||||
@ -693,25 +505,23 @@ size_t LRUCache::GetCharge(Handle* handle) const {
|
||||
return reinterpret_cast<const LRUHandle*>(handle)->charge;
|
||||
}
|
||||
|
||||
Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const {
|
||||
auto h = reinterpret_cast<const LRUHandle*>(handle);
|
||||
if (h->IsSecondaryCacheCompatible()) {
|
||||
return h->info_.helper->del_cb;
|
||||
} else {
|
||||
return h->info_.deleter;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t LRUCache::GetHash(Handle* handle) const {
|
||||
return reinterpret_cast<const LRUHandle*>(handle)->hash;
|
||||
}
|
||||
|
||||
void LRUCache::DisownData() {
|
||||
// Leak data only if that won't generate an ASAN/valgrind warning.
|
||||
if (!kMustFreeHeapAllocations) {
|
||||
shards_ = nullptr;
|
||||
num_shards_ = 0;
|
||||
}
|
||||
// Do not drop data if compile with ASAN to suppress leak warning.
|
||||
#if defined(__clang__)
|
||||
#if !defined(__has_feature) || !__has_feature(address_sanitizer)
|
||||
shards_ = nullptr;
|
||||
num_shards_ = 0;
|
||||
#endif
|
||||
#else // __clang__
|
||||
#ifndef __SANITIZE_ADDRESS__
|
||||
shards_ = nullptr;
|
||||
num_shards_ = 0;
|
||||
#endif // !__SANITIZE_ADDRESS__
|
||||
#endif // __clang__
|
||||
}
|
||||
|
||||
size_t LRUCache::TEST_GetLRUSize() {
|
||||
@ -730,75 +540,33 @@ double LRUCache::GetHighPriPoolRatio() {
|
||||
return result;
|
||||
}
|
||||
|
||||
void LRUCache::WaitAll(std::vector<Handle*>& handles) {
|
||||
if (secondary_cache_) {
|
||||
std::vector<SecondaryCacheResultHandle*> sec_handles;
|
||||
sec_handles.reserve(handles.size());
|
||||
for (Handle* handle : handles) {
|
||||
if (!handle) {
|
||||
continue;
|
||||
}
|
||||
LRUHandle* lru_handle = reinterpret_cast<LRUHandle*>(handle);
|
||||
if (!lru_handle->IsPending()) {
|
||||
continue;
|
||||
}
|
||||
sec_handles.emplace_back(lru_handle->sec_handle);
|
||||
}
|
||||
secondary_cache_->WaitAll(sec_handles);
|
||||
for (Handle* handle : handles) {
|
||||
if (!handle) {
|
||||
continue;
|
||||
}
|
||||
LRUHandle* lru_handle = reinterpret_cast<LRUHandle*>(handle);
|
||||
if (!lru_handle->IsPending()) {
|
||||
continue;
|
||||
}
|
||||
uint32_t hash = GetHash(handle);
|
||||
LRUCacheShard* shard = static_cast<LRUCacheShard*>(GetShard(Shard(hash)));
|
||||
shard->Promote(lru_handle);
|
||||
}
|
||||
}
|
||||
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
|
||||
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
|
||||
cache_opts.strict_capacity_limit,
|
||||
cache_opts.high_pri_pool_ratio,
|
||||
cache_opts.memory_allocator,
|
||||
cache_opts.use_adaptive_mutex);
|
||||
}
|
||||
|
||||
} // namespace lru_cache
|
||||
|
||||
std::shared_ptr<Cache> NewLRUCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
const std::shared_ptr<SecondaryCache>& secondary_cache) {
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator,
|
||||
bool use_adaptive_mutex) {
|
||||
if (num_shard_bits >= 20) {
|
||||
return nullptr; // The cache cannot be sharded into too many fine pieces.
|
||||
return nullptr; // the cache cannot be sharded into too many fine pieces
|
||||
}
|
||||
if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
|
||||
// Invalid high_pri_pool_ratio
|
||||
// invalid high_pri_pool_ratio
|
||||
return nullptr;
|
||||
}
|
||||
if (num_shard_bits < 0) {
|
||||
num_shard_bits = GetDefaultCacheShardBits(capacity);
|
||||
}
|
||||
return std::make_shared<LRUCache>(
|
||||
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
|
||||
std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy,
|
||||
secondary_cache);
|
||||
return std::make_shared<LRUCache>(capacity, num_shard_bits,
|
||||
strict_capacity_limit, high_pri_pool_ratio,
|
||||
std::move(memory_allocator),
|
||||
use_adaptive_mutex);
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
|
||||
return NewLRUCache(
|
||||
cache_opts.capacity, cache_opts.num_shard_bits,
|
||||
cache_opts.strict_capacity_limit, cache_opts.high_pri_pool_ratio,
|
||||
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
|
||||
cache_opts.metadata_charge_policy, cache_opts.secondary_cache);
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> NewLRUCache(
|
||||
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
||||
high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
|
||||
metadata_charge_policy, nullptr);
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
311
cache/lru_cache.h
vendored
311
cache/lru_cache.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
@ -8,129 +8,84 @@
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "cache/sharded_cache.h"
|
||||
#include "port/lang.h"
|
||||
#include "port/malloc.h"
|
||||
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/secondary_cache.h"
|
||||
#include "util/autovector.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace lru_cache {
|
||||
namespace rocksdb {
|
||||
|
||||
// LRU cache implementation. This class is not thread-safe.
|
||||
// LRU cache implementation
|
||||
|
||||
// An entry is a variable length heap-allocated structure.
|
||||
// Entries are referenced by cache and/or by any external entity.
|
||||
// The cache keeps all its entries in a hash table. Some elements
|
||||
// The cache keeps all its entries in table. Some elements
|
||||
// are also stored on LRU list.
|
||||
//
|
||||
// LRUHandle can be in these states:
|
||||
// 1. Referenced externally AND in hash table.
|
||||
// In that case the entry is *not* in the LRU list
|
||||
// (refs >= 1 && in_cache == true)
|
||||
// 2. Not referenced externally AND in hash table.
|
||||
// In that case the entry is in the LRU list and can be freed.
|
||||
// (refs == 0 && in_cache == true)
|
||||
// 3. Referenced externally AND not in hash table.
|
||||
// In that case the entry is not in the LRU list and not in hash table.
|
||||
// The entry can be freed when refs becomes 0.
|
||||
// (refs >= 1 && in_cache == false)
|
||||
// In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
|
||||
// 2. Not referenced externally and in hash table. In that case the entry is
|
||||
// in the LRU and can be freed. (refs == 1 && in_cache == true)
|
||||
// 3. Referenced externally and not in hash table. In that case the entry is
|
||||
// in not on LRU and not in table. (refs >= 1 && in_cache == false)
|
||||
//
|
||||
// All newly created LRUHandles are in state 1. If you call
|
||||
// LRUCacheShard::Release on entry in state 1, it will go into state 2.
|
||||
// To move from state 1 to state 3, either call LRUCacheShard::Erase or
|
||||
// LRUCacheShard::Insert with the same key (but possibly different value).
|
||||
// LRUCacheShard::Release
|
||||
// on entry in state 1, it will go into state 2. To move from state 1 to
|
||||
// state 3, either call LRUCacheShard::Erase or LRUCacheShard::Insert with the
|
||||
// same key.
|
||||
// To move from state 2 to state 1, use LRUCacheShard::Lookup.
|
||||
// Before destruction, make sure that no handles are in state 1. This means
|
||||
// that any successful LRUCacheShard::Lookup/LRUCacheShard::Insert have a
|
||||
// matching LRUCache::Release (to move into state 2) or LRUCacheShard::Erase
|
||||
// (to move into state 3).
|
||||
// matching
|
||||
// RUCache::Release (to move into state 2) or LRUCacheShard::Erase (for state 3)
|
||||
|
||||
struct LRUHandle {
|
||||
void* value;
|
||||
union Info {
|
||||
Info() {}
|
||||
~Info() {}
|
||||
Cache::DeleterFn deleter;
|
||||
const ShardedCache::CacheItemHelper* helper;
|
||||
} info_;
|
||||
// An entry is not added to the LRUHandleTable until the secondary cache
|
||||
// lookup is complete, so its safe to have this union.
|
||||
union {
|
||||
LRUHandle* next_hash;
|
||||
SecondaryCacheResultHandle* sec_handle;
|
||||
};
|
||||
void (*deleter)(const Slice&, void* value);
|
||||
LRUHandle* next_hash;
|
||||
LRUHandle* next;
|
||||
LRUHandle* prev;
|
||||
size_t charge; // TODO(opt): Only allow uint32_t?
|
||||
size_t key_length;
|
||||
// The hash of key(). Used for fast sharding and comparisons.
|
||||
uint32_t hash;
|
||||
// The number of external refs to this entry. The cache itself is not counted.
|
||||
uint32_t refs;
|
||||
uint32_t refs; // a number of refs to this entry
|
||||
// cache itself is counted as 1
|
||||
|
||||
// Include the following flags:
|
||||
// IN_CACHE: whether this entry is referenced by the hash table.
|
||||
// IS_HIGH_PRI: whether this entry is high priority entry.
|
||||
// IN_HIGH_PRI_POOL: whether this entry is in high-pri pool.
|
||||
// HAS_HIT: whether this entry has had any lookups (hits).
|
||||
enum Flags : uint8_t {
|
||||
// Whether this entry is referenced by the hash table.
|
||||
IN_CACHE = (1 << 0),
|
||||
// Whether this entry is high priority entry.
|
||||
IS_HIGH_PRI = (1 << 1),
|
||||
// Whether this entry is in high-pri pool.
|
||||
IN_HIGH_PRI_POOL = (1 << 2),
|
||||
// Whether this entry has had any lookups (hits).
|
||||
HAS_HIT = (1 << 3),
|
||||
// Can this be inserted into the secondary cache.
|
||||
IS_SECONDARY_CACHE_COMPATIBLE = (1 << 4),
|
||||
// Is the handle still being read from a lower tier.
|
||||
IS_PENDING = (1 << 5),
|
||||
// Whether this handle is still in a lower tier
|
||||
IS_IN_SECONDARY_CACHE = (1 << 6),
|
||||
};
|
||||
|
||||
uint8_t flags;
|
||||
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
// TSAN can report a false data race on flags, where one thread is writing
|
||||
// to one of the mutable bits and another thread is reading this immutable
|
||||
// bit. So precisely suppress that TSAN warning, we separate out this bit
|
||||
// during TSAN runs.
|
||||
bool is_secondary_cache_compatible_for_tsan;
|
||||
#endif // __SANITIZE_THREAD__
|
||||
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
|
||||
|
||||
// Beginning of the key (MUST BE THE LAST FIELD IN THIS STRUCT!)
|
||||
char key_data[1];
|
||||
char key_data[1]; // Beginning of key
|
||||
|
||||
Slice key() const { return Slice(key_data, key_length); }
|
||||
|
||||
// Increase the reference count by 1.
|
||||
void Ref() { refs++; }
|
||||
|
||||
// Just reduce the reference count by 1. Return true if it was last reference.
|
||||
bool Unref() {
|
||||
assert(refs > 0);
|
||||
refs--;
|
||||
return refs == 0;
|
||||
Slice key() const {
|
||||
// For cheaper lookups, we allow a temporary Handle object
|
||||
// to store a pointer to a key in "value".
|
||||
if (next == this) {
|
||||
return *(reinterpret_cast<Slice*>(value));
|
||||
} else {
|
||||
return Slice(key_data, key_length);
|
||||
}
|
||||
}
|
||||
|
||||
// Return true if there are external refs, false otherwise.
|
||||
bool HasRefs() const { return refs > 0; }
|
||||
|
||||
bool InCache() const { return flags & IN_CACHE; }
|
||||
bool IsHighPri() const { return flags & IS_HIGH_PRI; }
|
||||
bool InHighPriPool() const { return flags & IN_HIGH_PRI_POOL; }
|
||||
bool HasHit() const { return flags & HAS_HIT; }
|
||||
bool IsSecondaryCacheCompatible() const {
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
return is_secondary_cache_compatible_for_tsan;
|
||||
#else
|
||||
return flags & IS_SECONDARY_CACHE_COMPATIBLE;
|
||||
#endif // __SANITIZE_THREAD__
|
||||
}
|
||||
bool IsPending() const { return flags & IS_PENDING; }
|
||||
bool IsInSecondaryCache() const { return flags & IS_IN_SECONDARY_CACHE; }
|
||||
|
||||
void SetInCache(bool in_cache) {
|
||||
if (in_cache) {
|
||||
@ -158,71 +113,13 @@ struct LRUHandle {
|
||||
|
||||
void SetHit() { flags |= HAS_HIT; }
|
||||
|
||||
void SetSecondaryCacheCompatible(bool compat) {
|
||||
if (compat) {
|
||||
flags |= IS_SECONDARY_CACHE_COMPATIBLE;
|
||||
} else {
|
||||
flags &= ~IS_SECONDARY_CACHE_COMPATIBLE;
|
||||
}
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
is_secondary_cache_compatible_for_tsan = compat;
|
||||
#endif // __SANITIZE_THREAD__
|
||||
}
|
||||
|
||||
void SetIncomplete(bool incomp) {
|
||||
if (incomp) {
|
||||
flags |= IS_PENDING;
|
||||
} else {
|
||||
flags &= ~IS_PENDING;
|
||||
}
|
||||
}
|
||||
|
||||
void SetIsInSecondaryCache(bool is_in_secondary_cache) {
|
||||
if (is_in_secondary_cache) {
|
||||
flags |= IS_IN_SECONDARY_CACHE;
|
||||
} else {
|
||||
flags &= ~IS_IN_SECONDARY_CACHE;
|
||||
}
|
||||
}
|
||||
|
||||
void Free() {
|
||||
assert(refs == 0);
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
// Here we can safely assert they are the same without a data race reported
|
||||
assert(((flags & IS_SECONDARY_CACHE_COMPATIBLE) != 0) ==
|
||||
is_secondary_cache_compatible_for_tsan);
|
||||
#endif // __SANITIZE_THREAD__
|
||||
if (!IsSecondaryCacheCompatible() && info_.deleter) {
|
||||
(*info_.deleter)(key(), value);
|
||||
} else if (IsSecondaryCacheCompatible()) {
|
||||
if (IsPending()) {
|
||||
assert(sec_handle != nullptr);
|
||||
SecondaryCacheResultHandle* tmp_sec_handle = sec_handle;
|
||||
tmp_sec_handle->Wait();
|
||||
value = tmp_sec_handle->Value();
|
||||
delete tmp_sec_handle;
|
||||
}
|
||||
if (value) {
|
||||
(*info_.helper->del_cb)(key(), value);
|
||||
}
|
||||
assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
|
||||
if (deleter) {
|
||||
(*deleter)(key(), value);
|
||||
}
|
||||
delete[] reinterpret_cast<char*>(this);
|
||||
}
|
||||
|
||||
// Calculate the memory usage by metadata.
|
||||
inline size_t CalcTotalCharge(
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
size_t meta_charge = 0;
|
||||
if (metadata_charge_policy == kFullChargeCacheMetadata) {
|
||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||
meta_charge += malloc_usable_size(static_cast<void*>(this));
|
||||
#else
|
||||
// This is the size that is used when a new handle is created.
|
||||
meta_charge += sizeof(LRUHandle) - 1 + key_length;
|
||||
#endif
|
||||
}
|
||||
return charge + meta_charge;
|
||||
}
|
||||
};
|
||||
|
||||
// We provide our own simple hash table since it removes a whole bunch
|
||||
@ -232,10 +129,7 @@ struct LRUHandle {
|
||||
// 4.4.3's builtin hashtable.
|
||||
class LRUHandleTable {
|
||||
public:
|
||||
// If the table uses more hash bits than `max_upper_hash_bits`,
|
||||
// it will eat into the bits used for sharding, which are constant
|
||||
// for a given LRUHandleTable.
|
||||
explicit LRUHandleTable(int max_upper_hash_bits);
|
||||
LRUHandleTable();
|
||||
~LRUHandleTable();
|
||||
|
||||
LRUHandle* Lookup(const Slice& key, uint32_t hash);
|
||||
@ -243,8 +137,8 @@ class LRUHandleTable {
|
||||
LRUHandle* Remove(const Slice& key, uint32_t hash);
|
||||
|
||||
template <typename T>
|
||||
void ApplyToEntriesRange(T func, uint32_t index_begin, uint32_t index_end) {
|
||||
for (uint32_t i = index_begin; i < index_end; i++) {
|
||||
void ApplyToAllCacheEntries(T func) {
|
||||
for (uint32_t i = 0; i < length_; i++) {
|
||||
LRUHandle* h = list_[i];
|
||||
while (h != nullptr) {
|
||||
auto n = h->next_hash;
|
||||
@ -255,8 +149,6 @@ class LRUHandleTable {
|
||||
}
|
||||
}
|
||||
|
||||
int GetLengthBits() const { return length_bits_; }
|
||||
|
||||
private:
|
||||
// Return a pointer to slot that points to a cache entry that
|
||||
// matches key/hash. If there is no such cache entry, return a
|
||||
@ -265,34 +157,23 @@ class LRUHandleTable {
|
||||
|
||||
void Resize();
|
||||
|
||||
// Number of hash bits (upper because lower bits used for sharding)
|
||||
// used for table index. Length == 1 << length_bits_
|
||||
int length_bits_;
|
||||
|
||||
// The table consists of an array of buckets where each bucket is
|
||||
// a linked list of cache entries that hash into the bucket.
|
||||
std::unique_ptr<LRUHandle*[]> list_;
|
||||
|
||||
// Number of elements currently in the table.
|
||||
LRUHandle** list_;
|
||||
uint32_t length_;
|
||||
uint32_t elems_;
|
||||
|
||||
// Set from max_upper_hash_bits (see constructor).
|
||||
const int max_length_bits_;
|
||||
};
|
||||
|
||||
// A single shard of sharded cache.
|
||||
class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
public:
|
||||
LRUCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio, bool use_adaptive_mutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
int max_upper_hash_bits,
|
||||
const std::shared_ptr<SecondaryCache>& secondary_cache);
|
||||
virtual ~LRUCacheShard() override = default;
|
||||
double high_pri_pool_ratio, bool use_adaptive_mutex);
|
||||
virtual ~LRUCacheShard();
|
||||
|
||||
// Separate from constructor so caller can easily make an array of LRUCache
|
||||
// if current usage is more than new capacity, the function will attempt to
|
||||
// free the needed space.
|
||||
// free the needed space
|
||||
virtual void SetCapacity(size_t capacity) override;
|
||||
|
||||
// Set the flag to reject insertion if cache if full.
|
||||
@ -303,37 +184,14 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
|
||||
// Like Cache methods, but with an extra "hash" parameter.
|
||||
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||
size_t charge, Cache::DeleterFn deleter,
|
||||
size_t charge,
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
Cache::Handle** handle,
|
||||
Cache::Priority priority) override {
|
||||
return Insert(key, hash, value, charge, deleter, nullptr, handle, priority);
|
||||
}
|
||||
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||
const Cache::CacheItemHelper* helper, size_t charge,
|
||||
Cache::Handle** handle,
|
||||
Cache::Priority priority) override {
|
||||
assert(helper);
|
||||
return Insert(key, hash, value, charge, nullptr, helper, handle, priority);
|
||||
}
|
||||
// If helper_cb is null, the values of the following arguments don't matter.
|
||||
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash,
|
||||
const ShardedCache::CacheItemHelper* helper,
|
||||
const ShardedCache::CreateCallback& create_cb,
|
||||
ShardedCache::Priority priority, bool wait,
|
||||
Statistics* stats) override;
|
||||
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override {
|
||||
return Lookup(key, hash, nullptr, nullptr, Cache::Priority::LOW, true,
|
||||
nullptr);
|
||||
}
|
||||
virtual bool Release(Cache::Handle* handle, bool /*useful*/,
|
||||
bool erase_if_last_ref) override {
|
||||
return Release(handle, erase_if_last_ref);
|
||||
}
|
||||
virtual bool IsReady(Cache::Handle* /*handle*/) override;
|
||||
virtual void Wait(Cache::Handle* /*handle*/) override {}
|
||||
Cache::Priority priority) override;
|
||||
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
|
||||
virtual bool Ref(Cache::Handle* handle) override;
|
||||
virtual bool Release(Cache::Handle* handle,
|
||||
bool erase_if_last_ref = false) override;
|
||||
bool force_erase = false) override;
|
||||
virtual void Erase(const Slice& key, uint32_t hash) override;
|
||||
|
||||
// Although in some platforms the update of size_t is atomic, to make sure
|
||||
@ -343,10 +201,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
virtual size_t GetUsage() const override;
|
||||
virtual size_t GetPinnedUsage() const override;
|
||||
|
||||
virtual void ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) override;
|
||||
virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) override;
|
||||
|
||||
virtual void EraseUnRefEntries() override;
|
||||
|
||||
@ -354,33 +210,14 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
|
||||
void TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri);
|
||||
|
||||
// Retrieves number of elements in LRU, for unit test purpose only.
|
||||
// Not threadsafe.
|
||||
// Retrieves number of elements in LRU, for unit test purpose only
|
||||
// not threadsafe
|
||||
size_t TEST_GetLRUSize();
|
||||
|
||||
// Retrieves high pri pool ratio
|
||||
// Retrives high pri pool ratio
|
||||
double GetHighPriPoolRatio();
|
||||
|
||||
private:
|
||||
friend class LRUCache;
|
||||
// Insert an item into the hash table and, if handle is null, insert into
|
||||
// the LRU list. Older items are evicted as necessary. If the cache is full
|
||||
// and free_handle_on_fail is true, the item is deleted and handle is set to
|
||||
// nullptr.
|
||||
Status InsertItem(LRUHandle* item, Cache::Handle** handle,
|
||||
bool free_handle_on_fail);
|
||||
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
|
||||
DeleterFn deleter, const Cache::CacheItemHelper* helper,
|
||||
Cache::Handle** handle, Cache::Priority priority);
|
||||
// Promote an item looked up from the secondary cache to the LRU cache.
|
||||
// The item may be still in the secondary cache.
|
||||
// It is only inserted into the hash table and not the LRU list, and only
|
||||
// if the cache is not at full capacity, as is the case during Insert. The
|
||||
// caller should hold a reference on the LRUHandle. When the caller releases
|
||||
// the last reference, the item is added to the LRU list.
|
||||
// The item is promoted to the high pri or low pri pool as specified by the
|
||||
// caller in Lookup.
|
||||
void Promote(LRUHandle* e);
|
||||
void LRU_Remove(LRUHandle* e);
|
||||
void LRU_Insert(LRUHandle* e);
|
||||
|
||||
@ -388,10 +225,14 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
// high-pri pool is no larger than the size specify by high_pri_pool_pct.
|
||||
void MaintainPoolSize();
|
||||
|
||||
// Just reduce the reference count by 1.
|
||||
// Return true if last reference
|
||||
bool Unref(LRUHandle* e);
|
||||
|
||||
// Free some space following strict LRU policy until enough space
|
||||
// to hold (usage_ + charge) is freed or the lru list is empty
|
||||
// This function is not thread safe - it needs to be executed while
|
||||
// holding the mutex_.
|
||||
// holding the mutex_
|
||||
void EvictFromLRU(size_t charge, autovector<LRUHandle*>* deleted);
|
||||
|
||||
// Initialized before use.
|
||||
@ -431,18 +272,16 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
|
||||
// ------------vvvvvvvvvvvvv-----------
|
||||
LRUHandleTable table_;
|
||||
|
||||
// Memory size for entries residing in the cache.
|
||||
// Memory size for entries residing in the cache
|
||||
size_t usage_;
|
||||
|
||||
// Memory size for entries residing only in the LRU list.
|
||||
// Memory size for entries residing only in the LRU list
|
||||
size_t lru_usage_;
|
||||
|
||||
// mutex_ protects the following state.
|
||||
// We don't count mutex_ as the cache's internal state so semantically we
|
||||
// don't mind mutex_ invoking the non-const actions.
|
||||
mutable port::Mutex mutex_;
|
||||
|
||||
std::shared_ptr<SecondaryCache> secondary_cache_;
|
||||
};
|
||||
|
||||
class LRUCache
|
||||
@ -454,36 +293,24 @@ class LRUCache
|
||||
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
double high_pri_pool_ratio,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
|
||||
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
|
||||
CacheMetadataChargePolicy metadata_charge_policy =
|
||||
kDontChargeCacheMetadata,
|
||||
const std::shared_ptr<SecondaryCache>& secondary_cache = nullptr);
|
||||
bool use_adaptive_mutex = kDefaultToAdaptiveMutex);
|
||||
virtual ~LRUCache();
|
||||
virtual const char* Name() const override { return "LRUCache"; }
|
||||
virtual CacheShard* GetShard(uint32_t shard) override;
|
||||
virtual const CacheShard* GetShard(uint32_t shard) const override;
|
||||
virtual CacheShard* GetShard(int shard) override;
|
||||
virtual const CacheShard* GetShard(int shard) const override;
|
||||
virtual void* Value(Handle* handle) override;
|
||||
virtual size_t GetCharge(Handle* handle) const override;
|
||||
virtual uint32_t GetHash(Handle* handle) const override;
|
||||
virtual DeleterFn GetDeleter(Handle* handle) const override;
|
||||
virtual void DisownData() override;
|
||||
virtual void WaitAll(std::vector<Handle*>& handles) override;
|
||||
|
||||
// Retrieves number of elements in LRU, for unit test purpose only.
|
||||
// Retrieves number of elements in LRU, for unit test purpose only
|
||||
size_t TEST_GetLRUSize();
|
||||
// Retrieves high pri pool ratio.
|
||||
// Retrives high pri pool ratio
|
||||
double GetHighPriPoolRatio();
|
||||
|
||||
private:
|
||||
LRUCacheShard* shards_ = nullptr;
|
||||
int num_shards_ = 0;
|
||||
std::shared_ptr<SecondaryCache> secondary_cache_;
|
||||
};
|
||||
|
||||
} // namespace lru_cache
|
||||
|
||||
using LRUCache = lru_cache::LRUCache;
|
||||
using LRUHandle = lru_cache::LRUHandle;
|
||||
using LRUCacheShard = lru_cache::LRUCacheShard;
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
1672
cache/lru_cache_test.cc
vendored
1672
cache/lru_cache_test.cc
vendored
File diff suppressed because it is too large
Load Diff
125
cache/sharded_cache.cc
vendored
125
cache/sharded_cache.cc
vendored
@ -7,112 +7,67 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
|
||||
#include "cache/sharded_cache.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "util/hash.h"
|
||||
#include "util/math.h"
|
||||
#include "util/mutexlock.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
namespace {
|
||||
|
||||
inline uint32_t HashSlice(const Slice& s) {
|
||||
return Lower32of64(GetSliceNPHash64(s));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
namespace rocksdb {
|
||||
|
||||
ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
|
||||
bool strict_capacity_limit,
|
||||
std::shared_ptr<MemoryAllocator> allocator)
|
||||
: Cache(std::move(allocator)),
|
||||
shard_mask_((uint32_t{1} << num_shard_bits) - 1),
|
||||
num_shard_bits_(num_shard_bits),
|
||||
capacity_(capacity),
|
||||
strict_capacity_limit_(strict_capacity_limit),
|
||||
last_id_(1) {}
|
||||
|
||||
void ShardedCache::SetCapacity(size_t capacity) {
|
||||
uint32_t num_shards = GetNumShards();
|
||||
int num_shards = 1 << num_shard_bits_;
|
||||
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
|
||||
MutexLock l(&capacity_mutex_);
|
||||
for (uint32_t s = 0; s < num_shards; s++) {
|
||||
for (int s = 0; s < num_shards; s++) {
|
||||
GetShard(s)->SetCapacity(per_shard);
|
||||
}
|
||||
capacity_ = capacity;
|
||||
}
|
||||
|
||||
void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
||||
uint32_t num_shards = GetNumShards();
|
||||
int num_shards = 1 << num_shard_bits_;
|
||||
MutexLock l(&capacity_mutex_);
|
||||
for (uint32_t s = 0; s < num_shards; s++) {
|
||||
for (int s = 0; s < num_shards; s++) {
|
||||
GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
|
||||
}
|
||||
strict_capacity_limit_ = strict_capacity_limit;
|
||||
}
|
||||
|
||||
Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter, Handle** handle,
|
||||
Priority priority) {
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
Handle** handle, Priority priority) {
|
||||
uint32_t hash = HashSlice(key);
|
||||
return GetShard(Shard(hash))
|
||||
->Insert(key, hash, value, charge, deleter, handle, priority);
|
||||
}
|
||||
|
||||
Status ShardedCache::Insert(const Slice& key, void* value,
|
||||
const CacheItemHelper* helper, size_t charge,
|
||||
Handle** handle, Priority priority) {
|
||||
uint32_t hash = HashSlice(key);
|
||||
if (!helper) {
|
||||
return Status::InvalidArgument();
|
||||
}
|
||||
return GetShard(Shard(hash))
|
||||
->Insert(key, hash, value, helper, charge, handle, priority);
|
||||
}
|
||||
|
||||
Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* /*stats*/) {
|
||||
uint32_t hash = HashSlice(key);
|
||||
return GetShard(Shard(hash))->Lookup(key, hash);
|
||||
}
|
||||
|
||||
Cache::Handle* ShardedCache::Lookup(const Slice& key,
|
||||
const CacheItemHelper* helper,
|
||||
const CreateCallback& create_cb,
|
||||
Priority priority, bool wait,
|
||||
Statistics* stats) {
|
||||
uint32_t hash = HashSlice(key);
|
||||
return GetShard(Shard(hash))
|
||||
->Lookup(key, hash, helper, create_cb, priority, wait, stats);
|
||||
}
|
||||
|
||||
bool ShardedCache::IsReady(Handle* handle) {
|
||||
uint32_t hash = GetHash(handle);
|
||||
return GetShard(Shard(hash))->IsReady(handle);
|
||||
}
|
||||
|
||||
void ShardedCache::Wait(Handle* handle) {
|
||||
uint32_t hash = GetHash(handle);
|
||||
GetShard(Shard(hash))->Wait(handle);
|
||||
}
|
||||
|
||||
bool ShardedCache::Ref(Handle* handle) {
|
||||
uint32_t hash = GetHash(handle);
|
||||
return GetShard(Shard(hash))->Ref(handle);
|
||||
}
|
||||
|
||||
bool ShardedCache::Release(Handle* handle, bool erase_if_last_ref) {
|
||||
bool ShardedCache::Release(Handle* handle, bool force_erase) {
|
||||
uint32_t hash = GetHash(handle);
|
||||
return GetShard(Shard(hash))->Release(handle, erase_if_last_ref);
|
||||
}
|
||||
|
||||
bool ShardedCache::Release(Handle* handle, bool useful,
|
||||
bool erase_if_last_ref) {
|
||||
uint32_t hash = GetHash(handle);
|
||||
return GetShard(Shard(hash))->Release(handle, useful, erase_if_last_ref);
|
||||
return GetShard(Shard(hash))->Release(handle, force_erase);
|
||||
}
|
||||
|
||||
void ShardedCache::Erase(const Slice& key) {
|
||||
@ -136,9 +91,9 @@ bool ShardedCache::HasStrictCapacityLimit() const {
|
||||
|
||||
size_t ShardedCache::GetUsage() const {
|
||||
// We will not lock the cache when getting the usage from shards.
|
||||
uint32_t num_shards = GetNumShards();
|
||||
int num_shards = 1 << num_shard_bits_;
|
||||
size_t usage = 0;
|
||||
for (uint32_t s = 0; s < num_shards; s++) {
|
||||
for (int s = 0; s < num_shards; s++) {
|
||||
usage += GetShard(s)->GetUsage();
|
||||
}
|
||||
return usage;
|
||||
@ -150,42 +105,25 @@ size_t ShardedCache::GetUsage(Handle* handle) const {
|
||||
|
||||
size_t ShardedCache::GetPinnedUsage() const {
|
||||
// We will not lock the cache when getting the usage from shards.
|
||||
uint32_t num_shards = GetNumShards();
|
||||
int num_shards = 1 << num_shard_bits_;
|
||||
size_t usage = 0;
|
||||
for (uint32_t s = 0; s < num_shards; s++) {
|
||||
for (int s = 0; s < num_shards; s++) {
|
||||
usage += GetShard(s)->GetPinnedUsage();
|
||||
}
|
||||
return usage;
|
||||
}
|
||||
|
||||
void ShardedCache::ApplyToAllEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
const ApplyToAllEntriesOptions& opts) {
|
||||
uint32_t num_shards = GetNumShards();
|
||||
// Iterate over part of each shard, rotating between shards, to
|
||||
// minimize impact on latency of concurrent operations.
|
||||
std::unique_ptr<uint32_t[]> states(new uint32_t[num_shards]{});
|
||||
|
||||
uint32_t aepl_in_32 = static_cast<uint32_t>(
|
||||
std::min(size_t{UINT32_MAX}, opts.average_entries_per_lock));
|
||||
aepl_in_32 = std::min(aepl_in_32, uint32_t{1});
|
||||
|
||||
bool remaining_work;
|
||||
do {
|
||||
remaining_work = false;
|
||||
for (uint32_t s = 0; s < num_shards; s++) {
|
||||
if (states[s] != UINT32_MAX) {
|
||||
GetShard(s)->ApplyToSomeEntries(callback, aepl_in_32, &states[s]);
|
||||
remaining_work |= states[s] != UINT32_MAX;
|
||||
}
|
||||
}
|
||||
} while (remaining_work);
|
||||
void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) {
|
||||
int num_shards = 1 << num_shard_bits_;
|
||||
for (int s = 0; s < num_shards; s++) {
|
||||
GetShard(s)->ApplyToAllCacheEntries(callback, thread_safe);
|
||||
}
|
||||
}
|
||||
|
||||
void ShardedCache::EraseUnRefEntries() {
|
||||
uint32_t num_shards = GetNumShards();
|
||||
for (uint32_t s = 0; s < num_shards; s++) {
|
||||
int num_shards = 1 << num_shard_bits_;
|
||||
for (int s = 0; s < num_shards; s++) {
|
||||
GetShard(s)->EraseUnRefEntries();
|
||||
}
|
||||
}
|
||||
@ -200,8 +138,7 @@ std::string ShardedCache::GetPrintableOptions() const {
|
||||
snprintf(buffer, kBufferSize, " capacity : %" ROCKSDB_PRIszt "\n",
|
||||
capacity_);
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " num_shard_bits : %d\n",
|
||||
GetNumShardBits());
|
||||
snprintf(buffer, kBufferSize, " num_shard_bits : %d\n", num_shard_bits_);
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " strict_capacity_limit : %d\n",
|
||||
strict_capacity_limit_);
|
||||
@ -226,8 +163,4 @@ int GetDefaultCacheShardBits(size_t capacity) {
|
||||
return num_shard_bits;
|
||||
}
|
||||
|
||||
int ShardedCache::GetNumShardBits() const { return BitsSetToOne(shard_mask_); }
|
||||
|
||||
uint32_t ShardedCache::GetNumShards() const { return shard_mask_ + 1; }
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
89
cache/sharded_cache.h
vendored
89
cache/sharded_cache.h
vendored
@ -14,8 +14,9 @@
|
||||
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "util/hash.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
namespace rocksdb {
|
||||
|
||||
// Single cache shard interface.
|
||||
class CacheShard {
|
||||
@ -23,47 +24,22 @@ class CacheShard {
|
||||
CacheShard() = default;
|
||||
virtual ~CacheShard() = default;
|
||||
|
||||
using DeleterFn = Cache::DeleterFn;
|
||||
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||
size_t charge, DeleterFn deleter,
|
||||
Cache::Handle** handle, Cache::Priority priority) = 0;
|
||||
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
|
||||
const Cache::CacheItemHelper* helper, size_t charge,
|
||||
size_t charge,
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
Cache::Handle** handle, Cache::Priority priority) = 0;
|
||||
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0;
|
||||
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash,
|
||||
const Cache::CacheItemHelper* helper,
|
||||
const Cache::CreateCallback& create_cb,
|
||||
Cache::Priority priority, bool wait,
|
||||
Statistics* stats) = 0;
|
||||
virtual bool Release(Cache::Handle* handle, bool useful,
|
||||
bool erase_if_last_ref) = 0;
|
||||
virtual bool IsReady(Cache::Handle* handle) = 0;
|
||||
virtual void Wait(Cache::Handle* handle) = 0;
|
||||
virtual bool Ref(Cache::Handle* handle) = 0;
|
||||
virtual bool Release(Cache::Handle* handle, bool erase_if_last_ref) = 0;
|
||||
virtual bool Release(Cache::Handle* handle, bool force_erase = false) = 0;
|
||||
virtual void Erase(const Slice& key, uint32_t hash) = 0;
|
||||
virtual void SetCapacity(size_t capacity) = 0;
|
||||
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
|
||||
virtual size_t GetUsage() const = 0;
|
||||
virtual size_t GetPinnedUsage() const = 0;
|
||||
// Handles iterating over roughly `average_entries_per_lock` entries, using
|
||||
// `state` to somehow record where it last ended up. Caller initially uses
|
||||
// *state == 0 and implementation sets *state = UINT32_MAX to indicate
|
||||
// completion.
|
||||
virtual void ApplyToSomeEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
uint32_t average_entries_per_lock, uint32_t* state) = 0;
|
||||
virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) = 0;
|
||||
virtual void EraseUnRefEntries() = 0;
|
||||
virtual std::string GetPrintableOptions() const { return ""; }
|
||||
void set_metadata_charge_policy(
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
metadata_charge_policy_ = metadata_charge_policy;
|
||||
}
|
||||
|
||||
protected:
|
||||
CacheMetadataChargePolicy metadata_charge_policy_ = kDontChargeCacheMetadata;
|
||||
};
|
||||
|
||||
// Generic cache interface which shards cache by hash of keys. 2^num_shard_bits
|
||||
@ -74,31 +50,23 @@ class ShardedCache : public Cache {
|
||||
ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
||||
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr);
|
||||
virtual ~ShardedCache() = default;
|
||||
virtual CacheShard* GetShard(uint32_t shard) = 0;
|
||||
virtual const CacheShard* GetShard(uint32_t shard) const = 0;
|
||||
|
||||
virtual const char* Name() const override = 0;
|
||||
virtual CacheShard* GetShard(int shard) = 0;
|
||||
virtual const CacheShard* GetShard(int shard) const = 0;
|
||||
virtual void* Value(Handle* handle) override = 0;
|
||||
virtual size_t GetCharge(Handle* handle) const = 0;
|
||||
virtual uint32_t GetHash(Handle* handle) const = 0;
|
||||
virtual void DisownData() override = 0;
|
||||
|
||||
virtual void SetCapacity(size_t capacity) override;
|
||||
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
|
||||
|
||||
virtual Status Insert(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter, Handle** handle,
|
||||
Priority priority) override;
|
||||
virtual Status Insert(const Slice& key, void* value,
|
||||
const CacheItemHelper* helper, size_t chargge,
|
||||
Handle** handle = nullptr,
|
||||
Priority priority = Priority::LOW) override;
|
||||
void (*deleter)(const Slice& key, void* value),
|
||||
Handle** handle, Priority priority) override;
|
||||
virtual Handle* Lookup(const Slice& key, Statistics* stats) override;
|
||||
virtual Handle* Lookup(const Slice& key, const CacheItemHelper* helper,
|
||||
const CreateCallback& create_cb, Priority priority,
|
||||
bool wait, Statistics* stats = nullptr) override;
|
||||
virtual bool Release(Handle* handle, bool useful,
|
||||
bool erase_if_last_ref = false) override;
|
||||
virtual bool IsReady(Handle* handle) override;
|
||||
virtual void Wait(Handle* handle) override;
|
||||
virtual bool Ref(Handle* handle) override;
|
||||
virtual bool Release(Handle* handle, bool erase_if_last_ref = false) override;
|
||||
virtual bool Release(Handle* handle, bool force_erase = false) override;
|
||||
virtual void Erase(const Slice& key) override;
|
||||
virtual uint64_t NewId() override;
|
||||
virtual size_t GetCapacity() const override;
|
||||
@ -106,21 +74,24 @@ class ShardedCache : public Cache {
|
||||
virtual size_t GetUsage() const override;
|
||||
virtual size_t GetUsage(Handle* handle) const override;
|
||||
virtual size_t GetPinnedUsage() const override;
|
||||
virtual void ApplyToAllEntries(
|
||||
const std::function<void(const Slice& key, void* value, size_t charge,
|
||||
DeleterFn deleter)>& callback,
|
||||
const ApplyToAllEntriesOptions& opts) override;
|
||||
virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
||||
bool thread_safe) override;
|
||||
virtual void EraseUnRefEntries() override;
|
||||
virtual std::string GetPrintableOptions() const override;
|
||||
|
||||
int GetNumShardBits() const;
|
||||
uint32_t GetNumShards() const;
|
||||
|
||||
protected:
|
||||
inline uint32_t Shard(uint32_t hash) { return hash & shard_mask_; }
|
||||
int GetNumShardBits() const { return num_shard_bits_; }
|
||||
|
||||
private:
|
||||
const uint32_t shard_mask_;
|
||||
static inline uint32_t HashSlice(const Slice& s) {
|
||||
return static_cast<uint32_t>(GetSliceNPHash64(s));
|
||||
}
|
||||
|
||||
uint32_t Shard(uint32_t hash) {
|
||||
// Note, hash >> 32 yields hash in gcc, not the zero we expect!
|
||||
return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
|
||||
}
|
||||
|
||||
int num_shard_bits_;
|
||||
mutable port::Mutex capacity_mutex_;
|
||||
size_t capacity_;
|
||||
bool strict_capacity_limit_;
|
||||
@ -129,4 +100,4 @@ class ShardedCache : public Cache {
|
||||
|
||||
extern int GetDefaultCacheShardBits(size_t capacity);
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace rocksdb
|
||||
|
@ -1,54 +1,3 @@
|
||||
@PACKAGE_INIT@
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/modules")
|
||||
|
||||
include(CMakeFindDependencyMacro)
|
||||
|
||||
set(GFLAGS_USE_TARGET_NAMESPACE @GFLAGS_USE_TARGET_NAMESPACE@)
|
||||
|
||||
if(@WITH_JEMALLOC@)
|
||||
find_dependency(JeMalloc)
|
||||
endif()
|
||||
|
||||
if(@WITH_GFLAGS@)
|
||||
find_dependency(gflags CONFIG)
|
||||
if(NOT gflags_FOUND)
|
||||
find_dependency(gflags)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(@WITH_SNAPPY@)
|
||||
find_dependency(Snappy CONFIG)
|
||||
if(NOT Snappy_FOUND)
|
||||
find_dependency(Snappy)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(@WITH_ZLIB@)
|
||||
find_dependency(ZLIB)
|
||||
endif()
|
||||
|
||||
if(@WITH_BZ2@)
|
||||
find_dependency(BZip2)
|
||||
endif()
|
||||
|
||||
if(@WITH_LZ4@)
|
||||
find_dependency(lz4)
|
||||
endif()
|
||||
|
||||
if(@WITH_ZSTD@)
|
||||
find_dependency(zstd)
|
||||
endif()
|
||||
|
||||
if(@WITH_NUMA@)
|
||||
find_dependency(NUMA)
|
||||
endif()
|
||||
|
||||
if(@WITH_TBB@)
|
||||
find_dependency(TBB)
|
||||
endif()
|
||||
|
||||
find_dependency(Threads)
|
||||
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/RocksDBTargets.cmake")
|
||||
check_required_components(RocksDB)
|
||||
|
@ -1,7 +0,0 @@
|
||||
macro(get_cxx_std_flags FLAGS_VARIABLE)
|
||||
if( CMAKE_CXX_STANDARD_REQUIRED )
|
||||
set(${FLAGS_VARIABLE} ${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION})
|
||||
else()
|
||||
set(${FLAGS_VARIABLE} ${CMAKE_CXX${CMAKE_CXX_STANDARD}_EXTENSION_COMPILE_OPTION})
|
||||
endif()
|
||||
endmacro()
|
@ -1,29 +1,21 @@
|
||||
# - Find JeMalloc library
|
||||
# Find the native JeMalloc includes and library
|
||||
#
|
||||
# JeMalloc_INCLUDE_DIRS - where to find jemalloc.h, etc.
|
||||
# JeMalloc_LIBRARIES - List of libraries when using jemalloc.
|
||||
# JeMalloc_FOUND - True if jemalloc found.
|
||||
# JEMALLOC_INCLUDE_DIR - where to find jemalloc.h, etc.
|
||||
# JEMALLOC_LIBRARIES - List of libraries when using jemalloc.
|
||||
# JEMALLOC_FOUND - True if jemalloc found.
|
||||
|
||||
find_path(JeMalloc_INCLUDE_DIRS
|
||||
find_path(JEMALLOC_INCLUDE_DIR
|
||||
NAMES jemalloc/jemalloc.h
|
||||
HINTS ${JEMALLOC_ROOT_DIR}/include)
|
||||
|
||||
find_library(JeMalloc_LIBRARIES
|
||||
find_library(JEMALLOC_LIBRARIES
|
||||
NAMES jemalloc
|
||||
HINTS ${JEMALLOC_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(JeMalloc DEFAULT_MSG JeMalloc_LIBRARIES JeMalloc_INCLUDE_DIRS)
|
||||
find_package_handle_standard_args(jemalloc DEFAULT_MSG JEMALLOC_LIBRARIES JEMALLOC_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
JeMalloc_LIBRARIES
|
||||
JeMalloc_INCLUDE_DIRS)
|
||||
|
||||
if(JeMalloc_FOUND AND NOT (TARGET JeMalloc::JeMalloc))
|
||||
add_library (JeMalloc::JeMalloc UNKNOWN IMPORTED)
|
||||
set_target_properties(JeMalloc::JeMalloc
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${JeMalloc_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${JeMalloc_INCLUDE_DIRS})
|
||||
endif()
|
||||
JEMALLOC_LIBRARIES
|
||||
JEMALLOC_INCLUDE_DIR)
|
||||
|
@ -1,11 +1,11 @@
|
||||
# - Find NUMA
|
||||
# Find the NUMA library and includes
|
||||
#
|
||||
# NUMA_INCLUDE_DIRS - where to find numa.h, etc.
|
||||
# NUMA_INCLUDE_DIR - where to find numa.h, etc.
|
||||
# NUMA_LIBRARIES - List of libraries when using NUMA.
|
||||
# NUMA_FOUND - True if NUMA found.
|
||||
|
||||
find_path(NUMA_INCLUDE_DIRS
|
||||
find_path(NUMA_INCLUDE_DIR
|
||||
NAMES numa.h numaif.h
|
||||
HINTS ${NUMA_ROOT_DIR}/include)
|
||||
|
||||
@ -14,16 +14,8 @@ find_library(NUMA_LIBRARIES
|
||||
HINTS ${NUMA_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(NUMA DEFAULT_MSG NUMA_LIBRARIES NUMA_INCLUDE_DIRS)
|
||||
find_package_handle_standard_args(NUMA DEFAULT_MSG NUMA_LIBRARIES NUMA_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
NUMA_LIBRARIES
|
||||
NUMA_INCLUDE_DIRS)
|
||||
|
||||
if(NUMA_FOUND AND NOT (TARGET NUMA::NUMA))
|
||||
add_library (NUMA::NUMA UNKNOWN IMPORTED)
|
||||
set_target_properties(NUMA::NUMA
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${NUMA_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${NUMA_INCLUDE_DIRS})
|
||||
endif()
|
||||
NUMA_INCLUDE_DIR)
|
||||
|
@ -1,29 +0,0 @@
|
||||
# - Find Snappy
|
||||
# Find the snappy compression library and includes
|
||||
#
|
||||
# Snappy_INCLUDE_DIRS - where to find snappy.h, etc.
|
||||
# Snappy_LIBRARIES - List of libraries when using snappy.
|
||||
# Snappy_FOUND - True if snappy found.
|
||||
|
||||
find_path(Snappy_INCLUDE_DIRS
|
||||
NAMES snappy.h
|
||||
HINTS ${snappy_ROOT_DIR}/include)
|
||||
|
||||
find_library(Snappy_LIBRARIES
|
||||
NAMES snappy
|
||||
HINTS ${snappy_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_LIBRARIES Snappy_INCLUDE_DIRS)
|
||||
|
||||
mark_as_advanced(
|
||||
Snappy_LIBRARIES
|
||||
Snappy_INCLUDE_DIRS)
|
||||
|
||||
if(Snappy_FOUND AND NOT (TARGET Snappy::snappy))
|
||||
add_library (Snappy::snappy UNKNOWN IMPORTED)
|
||||
set_target_properties(Snappy::snappy
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${Snappy_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${Snappy_INCLUDE_DIRS})
|
||||
endif()
|
@ -1,7 +1,7 @@
|
||||
# - Find TBB
|
||||
# Find the Thread Building Blocks library and includes
|
||||
#
|
||||
# TBB_INCLUDE_DIRS - where to find tbb.h, etc.
|
||||
# TBB_INCLUDE_DIR - where to find tbb.h, etc.
|
||||
# TBB_LIBRARIES - List of libraries when using TBB.
|
||||
# TBB_FOUND - True if TBB found.
|
||||
|
||||
@ -9,25 +9,17 @@ if(NOT DEFINED TBB_ROOT_DIR)
|
||||
set(TBB_ROOT_DIR "$ENV{TBBROOT}")
|
||||
endif()
|
||||
|
||||
find_path(TBB_INCLUDE_DIRS
|
||||
NAMES tbb/tbb.h
|
||||
HINTS ${TBB_ROOT_DIR}/include)
|
||||
find_path(TBB_INCLUDE_DIR
|
||||
NAMES tbb/tbb.h
|
||||
HINTS ${TBB_ROOT_DIR}/include)
|
||||
|
||||
find_library(TBB_LIBRARIES
|
||||
NAMES tbb
|
||||
HINTS ${TBB_ROOT_DIR}/lib ENV LIBRARY_PATH)
|
||||
NAMES tbb
|
||||
HINTS ${TBB_ROOT_DIR}/lib ENV LIBRARY_PATH)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(TBB DEFAULT_MSG TBB_LIBRARIES TBB_INCLUDE_DIRS)
|
||||
find_package_handle_standard_args(TBB DEFAULT_MSG TBB_LIBRARIES TBB_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
TBB_LIBRARIES
|
||||
TBB_INCLUDE_DIRS)
|
||||
|
||||
if(TBB_FOUND AND NOT (TARGET TBB::TBB))
|
||||
add_library (TBB::TBB UNKNOWN IMPORTED)
|
||||
set_target_properties(TBB::TBB
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${TBB_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${TBB_INCLUDE_DIRS})
|
||||
endif()
|
||||
TBB_LIBRARIES
|
||||
TBB_INCLUDE_DIR)
|
||||
|
21
cmake/modules/Findbzip2.cmake
Normal file
21
cmake/modules/Findbzip2.cmake
Normal file
@ -0,0 +1,21 @@
|
||||
# - Find Bzip2
|
||||
# Find the bzip2 compression library and includes
|
||||
#
|
||||
# BZIP2_INCLUDE_DIR - where to find bzlib.h, etc.
|
||||
# BZIP2_LIBRARIES - List of libraries when using bzip2.
|
||||
# BZIP2_FOUND - True if bzip2 found.
|
||||
|
||||
find_path(BZIP2_INCLUDE_DIR
|
||||
NAMES bzlib.h
|
||||
HINTS ${BZIP2_ROOT_DIR}/include)
|
||||
|
||||
find_library(BZIP2_LIBRARIES
|
||||
NAMES bz2
|
||||
HINTS ${BZIP2_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(bzip2 DEFAULT_MSG BZIP2_LIBRARIES BZIP2_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
BZIP2_LIBRARIES
|
||||
BZIP2_INCLUDE_DIR)
|
@ -1,29 +0,0 @@
|
||||
# - Find gflags library
|
||||
# Find the gflags includes and library
|
||||
#
|
||||
# GFLAGS_INCLUDE_DIR - where to find gflags.h.
|
||||
# GFLAGS_LIBRARIES - List of libraries when using gflags.
|
||||
# gflags_FOUND - True if gflags found.
|
||||
|
||||
find_path(GFLAGS_INCLUDE_DIR
|
||||
NAMES gflags/gflags.h)
|
||||
|
||||
find_library(GFLAGS_LIBRARIES
|
||||
NAMES gflags)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(gflags
|
||||
DEFAULT_MSG GFLAGS_LIBRARIES GFLAGS_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
GFLAGS_LIBRARIES
|
||||
GFLAGS_INCLUDE_DIR)
|
||||
|
||||
if(gflags_FOUND AND NOT (TARGET gflags::gflags))
|
||||
add_library(gflags::gflags UNKNOWN IMPORTED)
|
||||
set_target_properties(gflags::gflags
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${GFLAGS_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${GFLAGS_INCLUDE_DIR}
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX")
|
||||
endif()
|
@ -1,29 +1,21 @@
|
||||
# - Find Lz4
|
||||
# Find the lz4 compression library and includes
|
||||
#
|
||||
# lz4_INCLUDE_DIRS - where to find lz4.h, etc.
|
||||
# lz4_LIBRARIES - List of libraries when using lz4.
|
||||
# lz4_FOUND - True if lz4 found.
|
||||
# LZ4_INCLUDE_DIR - where to find lz4.h, etc.
|
||||
# LZ4_LIBRARIES - List of libraries when using lz4.
|
||||
# LZ4_FOUND - True if lz4 found.
|
||||
|
||||
find_path(lz4_INCLUDE_DIRS
|
||||
find_path(LZ4_INCLUDE_DIR
|
||||
NAMES lz4.h
|
||||
HINTS ${lz4_ROOT_DIR}/include)
|
||||
HINTS ${LZ4_ROOT_DIR}/include)
|
||||
|
||||
find_library(lz4_LIBRARIES
|
||||
find_library(LZ4_LIBRARIES
|
||||
NAMES lz4
|
||||
HINTS ${lz4_ROOT_DIR}/lib)
|
||||
HINTS ${LZ4_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(lz4 DEFAULT_MSG lz4_LIBRARIES lz4_INCLUDE_DIRS)
|
||||
find_package_handle_standard_args(lz4 DEFAULT_MSG LZ4_LIBRARIES LZ4_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
lz4_LIBRARIES
|
||||
lz4_INCLUDE_DIRS)
|
||||
|
||||
if(lz4_FOUND AND NOT (TARGET lz4::lz4))
|
||||
add_library(lz4::lz4 UNKNOWN IMPORTED)
|
||||
set_target_properties(lz4::lz4
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${lz4_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${lz4_INCLUDE_DIRS})
|
||||
endif()
|
||||
LZ4_LIBRARIES
|
||||
LZ4_INCLUDE_DIR)
|
||||
|
21
cmake/modules/Findsnappy.cmake
Normal file
21
cmake/modules/Findsnappy.cmake
Normal file
@ -0,0 +1,21 @@
|
||||
# - Find Snappy
|
||||
# Find the snappy compression library and includes
|
||||
#
|
||||
# SNAPPY_INCLUDE_DIR - where to find snappy.h, etc.
|
||||
# SNAPPY_LIBRARIES - List of libraries when using snappy.
|
||||
# SNAPPY_FOUND - True if snappy found.
|
||||
|
||||
find_path(SNAPPY_INCLUDE_DIR
|
||||
NAMES snappy.h
|
||||
HINTS ${SNAPPY_ROOT_DIR}/include)
|
||||
|
||||
find_library(SNAPPY_LIBRARIES
|
||||
NAMES snappy
|
||||
HINTS ${SNAPPY_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(snappy DEFAULT_MSG SNAPPY_LIBRARIES SNAPPY_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
SNAPPY_LIBRARIES
|
||||
SNAPPY_INCLUDE_DIR)
|
@ -1,26 +0,0 @@
|
||||
# - Find liburing
|
||||
#
|
||||
# uring_INCLUDE_DIR - Where to find liburing.h
|
||||
# uring_LIBRARIES - List of libraries when using uring.
|
||||
# uring_FOUND - True if uring found.
|
||||
|
||||
find_path(uring_INCLUDE_DIR
|
||||
NAMES liburing.h)
|
||||
find_library(uring_LIBRARIES
|
||||
NAMES liburing.a liburing)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(uring
|
||||
DEFAULT_MSG uring_LIBRARIES uring_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
uring_INCLUDE_DIR
|
||||
uring_LIBRARIES)
|
||||
|
||||
if(uring_FOUND AND NOT TARGET uring::uring)
|
||||
add_library(uring::uring UNKNOWN IMPORTED)
|
||||
set_target_properties(uring::uring PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${uring_INCLUDE_DIR}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
|
||||
IMPORTED_LOCATION "${uring_LIBRARIES}")
|
||||
endif()
|
@ -1,29 +1,21 @@
|
||||
# - Find zstd
|
||||
# Find the zstd compression library and includes
|
||||
#
|
||||
# zstd_INCLUDE_DIRS - where to find zstd.h, etc.
|
||||
# zstd_LIBRARIES - List of libraries when using zstd.
|
||||
# zstd_FOUND - True if zstd found.
|
||||
# ZSTD_INCLUDE_DIR - where to find zstd.h, etc.
|
||||
# ZSTD_LIBRARIES - List of libraries when using zstd.
|
||||
# ZSTD_FOUND - True if zstd found.
|
||||
|
||||
find_path(zstd_INCLUDE_DIRS
|
||||
find_path(ZSTD_INCLUDE_DIR
|
||||
NAMES zstd.h
|
||||
HINTS ${zstd_ROOT_DIR}/include)
|
||||
HINTS ${ZSTD_ROOT_DIR}/include)
|
||||
|
||||
find_library(zstd_LIBRARIES
|
||||
find_library(ZSTD_LIBRARIES
|
||||
NAMES zstd
|
||||
HINTS ${zstd_ROOT_DIR}/lib)
|
||||
HINTS ${ZSTD_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(zstd DEFAULT_MSG zstd_LIBRARIES zstd_INCLUDE_DIRS)
|
||||
find_package_handle_standard_args(zstd DEFAULT_MSG ZSTD_LIBRARIES ZSTD_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
zstd_LIBRARIES
|
||||
zstd_INCLUDE_DIRS)
|
||||
|
||||
if(zstd_FOUND AND NOT (TARGET zstd::zstd))
|
||||
add_library (zstd::zstd UNKNOWN IMPORTED)
|
||||
set_target_properties(zstd::zstd
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${zstd_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${zstd_INCLUDE_DIRS})
|
||||
endif()
|
||||
ZSTD_LIBRARIES
|
||||
ZSTD_INCLUDE_DIR)
|
||||
|
@ -1,10 +0,0 @@
|
||||
# Read rocksdb version from version.h header file.
|
||||
|
||||
function(get_rocksdb_version version_var)
|
||||
file(READ "${CMAKE_CURRENT_SOURCE_DIR}/include/rocksdb/version.h" version_header_file)
|
||||
foreach(component MAJOR MINOR PATCH)
|
||||
string(REGEX MATCH "#define ROCKSDB_${component} ([0-9]+)" _ ${version_header_file})
|
||||
set(ROCKSDB_VERSION_${component} ${CMAKE_MATCH_1})
|
||||
endforeach()
|
||||
set(${version_var} "${ROCKSDB_VERSION_MAJOR}.${ROCKSDB_VERSION_MINOR}.${ROCKSDB_VERSION_PATCH}" PARENT_SCOPE)
|
||||
endfunction()
|
30
common.mk
30
common.mk
@ -1,30 +0,0 @@
|
||||
ifndef PYTHON
|
||||
|
||||
# Default to python3. Some distros like CentOS 8 do not have `python`.
|
||||
ifeq ($(origin PYTHON), undefined)
|
||||
PYTHON := $(shell which python3 || which python || echo python3)
|
||||
endif
|
||||
export PYTHON
|
||||
|
||||
endif
|
||||
|
||||
# To setup tmp directory, first recognize some old variables for setting
|
||||
# test tmp directory or base tmp directory. TEST_TMPDIR is usually read
|
||||
# by RocksDB tools though Env/FileSystem::GetTestDirectory.
|
||||
ifeq ($(TEST_TMPDIR),)
|
||||
TEST_TMPDIR := $(TMPD)
|
||||
endif
|
||||
ifeq ($(TEST_TMPDIR),)
|
||||
ifeq ($(BASE_TMPDIR),)
|
||||
BASE_TMPDIR :=$(TMPDIR)
|
||||
endif
|
||||
ifeq ($(BASE_TMPDIR),)
|
||||
BASE_TMPDIR :=/tmp
|
||||
endif
|
||||
# Use /dev/shm if it has the sticky bit set (otherwise, /tmp or other
|
||||
# base dir), and create a randomly-named rocksdb.XXXX directory therein.
|
||||
TEST_TMPDIR := $(shell f=/dev/shm; test -k $$f || f=$(BASE_TMPDIR); \
|
||||
perl -le 'use File::Temp "tempdir";' \
|
||||
-e 'print tempdir("'$$f'/rocksdb.XXXX", CLEANUP => 0)')
|
||||
endif
|
||||
export TEST_TMPDIR
|
@ -12,24 +12,21 @@ fi
|
||||
ROOT=".."
|
||||
# Fetch right version of gcov
|
||||
if [ -d /mnt/gvfs/third-party -a -z "$CXX" ]; then
|
||||
source $ROOT/build_tools/fbcode_config_platform009.sh
|
||||
source $ROOT/build_tools/fbcode_config.sh
|
||||
GCOV=$GCC_BASE/bin/gcov
|
||||
else
|
||||
GCOV=$(which gcov)
|
||||
fi
|
||||
echo -e "Using $GCOV"
|
||||
|
||||
COVERAGE_DIR="$PWD/COVERAGE_REPORT"
|
||||
mkdir -p $COVERAGE_DIR
|
||||
|
||||
# Find all gcno files to generate the coverage report
|
||||
|
||||
PYTHON=${1:-`which python3`}
|
||||
echo -e "Using $PYTHON"
|
||||
GCNO_FILES=`find $ROOT -name "*.gcno"`
|
||||
$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null |
|
||||
# Parse the raw gcov report to more human readable form.
|
||||
$PYTHON $ROOT/coverage/parse_gcov_output.py |
|
||||
python $ROOT/coverage/parse_gcov_output.py |
|
||||
# Write the output to both stdout and report file.
|
||||
tee $COVERAGE_DIR/coverage_report_all.txt &&
|
||||
echo -e "Generated coverage report for all files: $COVERAGE_DIR/coverage_report_all.txt\n"
|
||||
@ -44,7 +41,7 @@ RECENT_REPORT=$COVERAGE_DIR/coverage_report_recent.txt
|
||||
|
||||
echo -e "Recently updated files: $LATEST_FILES\n" > $RECENT_REPORT
|
||||
$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null |
|
||||
$PYTHON $ROOT/coverage/parse_gcov_output.py -interested-files $LATEST_FILES |
|
||||
python $ROOT/coverage/parse_gcov_output.py -interested-files $LATEST_FILES |
|
||||
tee -a $RECENT_REPORT &&
|
||||
echo -e "Generated coverage report for recently updated files: $RECENT_REPORT\n"
|
||||
|
||||
|
@ -1,12 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import optparse
|
||||
import re
|
||||
import sys
|
||||
|
||||
from optparse import OptionParser
|
||||
|
||||
# the gcov report follows certain pattern. Each file will have two lines
|
||||
# of report, from which we can extract the file name, total lines and coverage
|
||||
# percentage.
|
||||
@ -50,7 +47,7 @@ def parse_gcov_report(gcov_input):
|
||||
def get_option_parser():
|
||||
usage = "Parse the gcov output and generate more human-readable code " +\
|
||||
"coverage report."
|
||||
parser = optparse.OptionParser(usage)
|
||||
parser = OptionParser(usage)
|
||||
|
||||
parser.add_option(
|
||||
"--interested-files", "-i",
|
||||
@ -75,8 +72,8 @@ def display_file_coverage(per_file_coverage, total_coverage):
|
||||
header_template = \
|
||||
"%" + str(max_file_name_length) + "s\t%s\t%s"
|
||||
separator = "-" * (max_file_name_length + 10 + 20)
|
||||
print(header_template % ("Filename", "Coverage", "Lines")) # noqa: E999 T25377293 Grandfathered in
|
||||
print(separator)
|
||||
print header_template % ("Filename", "Coverage", "Lines") # noqa: E999 T25377293 Grandfathered in
|
||||
print separator
|
||||
|
||||
# -- Print body
|
||||
# template for printing coverage report for each file.
|
||||
@ -84,12 +81,12 @@ def display_file_coverage(per_file_coverage, total_coverage):
|
||||
|
||||
for fname, coverage_info in per_file_coverage.items():
|
||||
coverage, lines = coverage_info
|
||||
print(record_template % (fname, coverage, lines))
|
||||
print record_template % (fname, coverage, lines)
|
||||
|
||||
# -- Print footer
|
||||
if total_coverage:
|
||||
print(separator)
|
||||
print(record_template % ("Total", total_coverage[0], total_coverage[1]))
|
||||
print separator
|
||||
print record_template % ("Total", total_coverage[0], total_coverage[1])
|
||||
|
||||
def report_coverage():
|
||||
parser = get_option_parser()
|
||||
@ -113,7 +110,7 @@ def report_coverage():
|
||||
total_coverage = None
|
||||
|
||||
if not len(per_file_coverage):
|
||||
print("Cannot find coverage info for the given files.", file=sys.stderr)
|
||||
print >> sys.stderr, "Cannot find coverage info for the given files."
|
||||
return
|
||||
display_file_coverage(per_file_coverage, total_coverage)
|
||||
|
||||
|
@ -1,93 +0,0 @@
|
||||
# This file is used by Meta-internal infrastructure as well as by Makefile
|
||||
|
||||
# When included from Makefile, there are rules to build DB_STRESS_CMD. When
|
||||
# used directly with `make -f crashtest.mk ...` there will be no rules to
|
||||
# build DB_STRESS_CMD so it must exist prior.
|
||||
DB_STRESS_CMD?=./db_stress
|
||||
|
||||
include common.mk
|
||||
|
||||
CRASHTEST_MAKE=$(MAKE) -f crash_test.mk
|
||||
CRASHTEST_PY=$(PYTHON) -u tools/db_crashtest.py --stress_cmd=$(DB_STRESS_CMD)
|
||||
|
||||
.PHONY: crash_test crash_test_with_atomic_flush crash_test_with_txn \
|
||||
crash_test_with_best_efforts_recovery crash_test_with_ts \
|
||||
blackbox_crash_test blackbox_crash_test_with_atomic_flush \
|
||||
blackbox_crash_test_with_txn blackbox_crash_test_with_ts \
|
||||
blackbox_crash_test_with_best_efforts_recovery \
|
||||
whitebox_crash_test whitebox_crash_test_with_atomic_flush \
|
||||
whitebox_crash_test_with_txn whitebox_crash_test_with_ts \
|
||||
blackbox_crash_test_with_multiops_wc_txn \
|
||||
blackbox_crash_test_with_multiops_wp_txn
|
||||
|
||||
crash_test: $(DB_STRESS_CMD)
|
||||
# Do not parallelize
|
||||
$(CRASHTEST_MAKE) whitebox_crash_test
|
||||
$(CRASHTEST_MAKE) blackbox_crash_test
|
||||
|
||||
crash_test_with_atomic_flush: $(DB_STRESS_CMD)
|
||||
# Do not parallelize
|
||||
$(CRASHTEST_MAKE) whitebox_crash_test_with_atomic_flush
|
||||
$(CRASHTEST_MAKE) blackbox_crash_test_with_atomic_flush
|
||||
|
||||
crash_test_with_txn: $(DB_STRESS_CMD)
|
||||
# Do not parallelize
|
||||
$(CRASHTEST_MAKE) whitebox_crash_test_with_txn
|
||||
$(CRASHTEST_MAKE) blackbox_crash_test_with_txn
|
||||
|
||||
crash_test_with_best_efforts_recovery: blackbox_crash_test_with_best_efforts_recovery
|
||||
|
||||
crash_test_with_ts: $(DB_STRESS_CMD)
|
||||
# Do not parallelize
|
||||
$(CRASHTEST_MAKE) whitebox_crash_test_with_ts
|
||||
$(CRASHTEST_MAKE) blackbox_crash_test_with_ts
|
||||
|
||||
crash_test_with_multiops_wc_txn: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_MAKE) blackbox_crash_test_with_multiops_wc_txn
|
||||
|
||||
crash_test_with_multiops_wp_txn: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_MAKE) blackbox_crash_test_with_multiops_wp_txn
|
||||
|
||||
blackbox_crash_test: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --simple blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
$(CRASHTEST_PY) blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
blackbox_crash_test_with_atomic_flush: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --cf_consistency blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
blackbox_crash_test_with_txn: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --txn blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
blackbox_crash_test_with_best_efforts_recovery: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --test_best_efforts_recovery blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
blackbox_crash_test_with_ts: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --enable_ts blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
blackbox_crash_test_with_multiops_wc_txn: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --test_multiops_txn --write_policy write_committed blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
blackbox_crash_test_with_multiops_wp_txn: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --test_multiops_txn --write_policy write_prepared blackbox $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
ifeq ($(CRASH_TEST_KILL_ODD),)
|
||||
CRASH_TEST_KILL_ODD=888887
|
||||
endif
|
||||
|
||||
whitebox_crash_test: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --simple whitebox --random_kill_odd \
|
||||
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
|
||||
$(CRASHTEST_PY) whitebox --random_kill_odd \
|
||||
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
whitebox_crash_test_with_atomic_flush: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --cf_consistency whitebox --random_kill_odd \
|
||||
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
whitebox_crash_test_with_txn: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --txn whitebox --random_kill_odd \
|
||||
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
|
||||
|
||||
whitebox_crash_test_with_ts: $(DB_STRESS_CMD)
|
||||
$(CRASHTEST_PY) --enable_ts whitebox --random_kill_odd \
|
||||
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
|
@ -1,131 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "db/arena_wrapped_db_iter.h"
|
||||
#include "memory/arena.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/iterator.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "table/iterator_wrapper.h"
|
||||
#include "util/user_comparator_wrapper.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
Status ArenaWrappedDBIter::GetProperty(std::string prop_name,
|
||||
std::string* prop) {
|
||||
if (prop_name == "rocksdb.iterator.super-version-number") {
|
||||
// First try to pass the value returned from inner iterator.
|
||||
if (!db_iter_->GetProperty(prop_name, prop).ok()) {
|
||||
*prop = std::to_string(sv_number_);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
return db_iter_->GetProperty(prop_name, prop);
|
||||
}
|
||||
|
||||
void ArenaWrappedDBIter::Init(
|
||||
Env* env, const ReadOptions& read_options, const ImmutableOptions& ioptions,
|
||||
const MutableCFOptions& mutable_cf_options, const Version* version,
|
||||
const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iteration,
|
||||
uint64_t version_number, ReadCallback* read_callback, DBImpl* db_impl,
|
||||
ColumnFamilyData* cfd, bool expose_blob_index, bool allow_refresh) {
|
||||
auto mem = arena_.AllocateAligned(sizeof(DBIter));
|
||||
db_iter_ =
|
||||
new (mem) DBIter(env, read_options, ioptions, mutable_cf_options,
|
||||
ioptions.user_comparator, /* iter */ nullptr, version,
|
||||
sequence, true, max_sequential_skip_in_iteration,
|
||||
read_callback, db_impl, cfd, expose_blob_index);
|
||||
sv_number_ = version_number;
|
||||
read_options_ = read_options;
|
||||
allow_refresh_ = allow_refresh;
|
||||
}
|
||||
|
||||
Status ArenaWrappedDBIter::Refresh() {
|
||||
if (cfd_ == nullptr || db_impl_ == nullptr || !allow_refresh_) {
|
||||
return Status::NotSupported("Creating renew iterator is not allowed.");
|
||||
}
|
||||
assert(db_iter_ != nullptr);
|
||||
// TODO(yiwu): For last_seq_same_as_publish_seq_==false, this is not the
|
||||
// correct behavior. Will be corrected automatically when we take a snapshot
|
||||
// here for the case of WritePreparedTxnDB.
|
||||
uint64_t cur_sv_number = cfd_->GetSuperVersionNumber();
|
||||
TEST_SYNC_POINT("ArenaWrappedDBIter::Refresh:1");
|
||||
TEST_SYNC_POINT("ArenaWrappedDBIter::Refresh:2");
|
||||
while (true) {
|
||||
if (sv_number_ != cur_sv_number) {
|
||||
Env* env = db_iter_->env();
|
||||
db_iter_->~DBIter();
|
||||
arena_.~Arena();
|
||||
new (&arena_) Arena();
|
||||
|
||||
SuperVersion* sv = cfd_->GetReferencedSuperVersion(db_impl_);
|
||||
SequenceNumber latest_seq = db_impl_->GetLatestSequenceNumber();
|
||||
if (read_callback_) {
|
||||
read_callback_->Refresh(latest_seq);
|
||||
}
|
||||
Init(env, read_options_, *(cfd_->ioptions()), sv->mutable_cf_options,
|
||||
sv->current, latest_seq,
|
||||
sv->mutable_cf_options.max_sequential_skip_in_iterations,
|
||||
cur_sv_number, read_callback_, db_impl_, cfd_, expose_blob_index_,
|
||||
allow_refresh_);
|
||||
|
||||
InternalIterator* internal_iter = db_impl_->NewInternalIterator(
|
||||
read_options_, cfd_, sv, &arena_, db_iter_->GetRangeDelAggregator(),
|
||||
latest_seq, /* allow_unprepared_value */ true);
|
||||
SetIterUnderDBIter(internal_iter);
|
||||
break;
|
||||
} else {
|
||||
SequenceNumber latest_seq = db_impl_->GetLatestSequenceNumber();
|
||||
// Refresh range-tombstones in MemTable
|
||||
if (!read_options_.ignore_range_deletions) {
|
||||
SuperVersion* sv = cfd_->GetThreadLocalSuperVersion(db_impl_);
|
||||
ReadRangeDelAggregator* range_del_agg =
|
||||
db_iter_->GetRangeDelAggregator();
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter;
|
||||
range_del_iter.reset(
|
||||
sv->mem->NewRangeTombstoneIterator(read_options_, latest_seq));
|
||||
range_del_agg->AddTombstones(std::move(range_del_iter));
|
||||
cfd_->ReturnThreadLocalSuperVersion(sv);
|
||||
}
|
||||
// Refresh latest sequence number
|
||||
db_iter_->set_sequence(latest_seq);
|
||||
db_iter_->set_valid(false);
|
||||
// Check again if the latest super version number is changed
|
||||
uint64_t latest_sv_number = cfd_->GetSuperVersionNumber();
|
||||
if (latest_sv_number != cur_sv_number) {
|
||||
// If the super version number is changed after refreshing,
|
||||
// fallback to Re-Init the InternalIterator
|
||||
cur_sv_number = latest_sv_number;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
ArenaWrappedDBIter* NewArenaWrappedDbIterator(
|
||||
Env* env, const ReadOptions& read_options, const ImmutableOptions& ioptions,
|
||||
const MutableCFOptions& mutable_cf_options, const Version* version,
|
||||
const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations,
|
||||
uint64_t version_number, ReadCallback* read_callback, DBImpl* db_impl,
|
||||
ColumnFamilyData* cfd, bool expose_blob_index, bool allow_refresh) {
|
||||
ArenaWrappedDBIter* iter = new ArenaWrappedDBIter();
|
||||
iter->Init(env, read_options, ioptions, mutable_cf_options, version, sequence,
|
||||
max_sequential_skip_in_iterations, version_number, read_callback,
|
||||
db_impl, cfd, expose_blob_index, allow_refresh);
|
||||
if (db_impl != nullptr && cfd != nullptr && allow_refresh) {
|
||||
iter->StoreRefreshInfo(db_impl, cfd, read_callback, expose_blob_index);
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,119 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include "db/db_impl/db_impl.h"
|
||||
#include "db/db_iter.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "memory/arena.h"
|
||||
#include "options/cf_options.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/iterator.h"
|
||||
#include "util/autovector.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
class Arena;
|
||||
class Version;
|
||||
|
||||
// A wrapper iterator which wraps DB Iterator and the arena, with which the DB
|
||||
// iterator is supposed to be allocated. This class is used as an entry point of
|
||||
// a iterator hierarchy whose memory can be allocated inline. In that way,
|
||||
// accessing the iterator tree can be more cache friendly. It is also faster
|
||||
// to allocate.
|
||||
// When using the class's Iterator interface, the behavior is exactly
|
||||
// the same as the inner DBIter.
|
||||
class ArenaWrappedDBIter : public Iterator {
|
||||
public:
|
||||
~ArenaWrappedDBIter() override {
|
||||
if (db_iter_ != nullptr) {
|
||||
db_iter_->~DBIter();
|
||||
} else {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the arena to be used to allocate memory for DBIter to be wrapped,
|
||||
// as well as child iterators in it.
|
||||
virtual Arena* GetArena() { return &arena_; }
|
||||
virtual ReadRangeDelAggregator* GetRangeDelAggregator() {
|
||||
return db_iter_->GetRangeDelAggregator();
|
||||
}
|
||||
const ReadOptions& GetReadOptions() { return read_options_; }
|
||||
|
||||
// Set the internal iterator wrapped inside the DB Iterator. Usually it is
|
||||
// a merging iterator.
|
||||
virtual void SetIterUnderDBIter(InternalIterator* iter) {
|
||||
db_iter_->SetIter(iter);
|
||||
}
|
||||
|
||||
bool Valid() const override { return db_iter_->Valid(); }
|
||||
void SeekToFirst() override { db_iter_->SeekToFirst(); }
|
||||
void SeekToLast() override { db_iter_->SeekToLast(); }
|
||||
// 'target' does not contain timestamp, even if user timestamp feature is
|
||||
// enabled.
|
||||
void Seek(const Slice& target) override { db_iter_->Seek(target); }
|
||||
void SeekForPrev(const Slice& target) override {
|
||||
db_iter_->SeekForPrev(target);
|
||||
}
|
||||
void Next() override { db_iter_->Next(); }
|
||||
void Prev() override { db_iter_->Prev(); }
|
||||
Slice key() const override { return db_iter_->key(); }
|
||||
Slice value() const override { return db_iter_->value(); }
|
||||
Status status() const override { return db_iter_->status(); }
|
||||
Slice timestamp() const override { return db_iter_->timestamp(); }
|
||||
bool IsBlob() const { return db_iter_->IsBlob(); }
|
||||
|
||||
Status GetProperty(std::string prop_name, std::string* prop) override;
|
||||
|
||||
Status Refresh() override;
|
||||
|
||||
void Init(Env* env, const ReadOptions& read_options,
|
||||
const ImmutableOptions& ioptions,
|
||||
const MutableCFOptions& mutable_cf_options, const Version* version,
|
||||
const SequenceNumber& sequence,
|
||||
uint64_t max_sequential_skip_in_iterations, uint64_t version_number,
|
||||
ReadCallback* read_callback, DBImpl* db_impl, ColumnFamilyData* cfd,
|
||||
bool expose_blob_index, bool allow_refresh);
|
||||
|
||||
// Store some parameters so we can refresh the iterator at a later point
|
||||
// with these same params
|
||||
void StoreRefreshInfo(DBImpl* db_impl, ColumnFamilyData* cfd,
|
||||
ReadCallback* read_callback, bool expose_blob_index) {
|
||||
db_impl_ = db_impl;
|
||||
cfd_ = cfd;
|
||||
read_callback_ = read_callback;
|
||||
expose_blob_index_ = expose_blob_index;
|
||||
}
|
||||
|
||||
private:
|
||||
DBIter* db_iter_ = nullptr;
|
||||
Arena arena_;
|
||||
uint64_t sv_number_;
|
||||
ColumnFamilyData* cfd_ = nullptr;
|
||||
DBImpl* db_impl_ = nullptr;
|
||||
ReadOptions read_options_;
|
||||
ReadCallback* read_callback_;
|
||||
bool expose_blob_index_ = false;
|
||||
bool allow_refresh_ = true;
|
||||
};
|
||||
|
||||
// Generate the arena wrapped iterator class.
|
||||
// `db_impl` and `cfd` are used for reneweal. If left null, renewal will not
|
||||
// be supported.
|
||||
extern ArenaWrappedDBIter* NewArenaWrappedDbIterator(
|
||||
Env* env, const ReadOptions& read_options, const ImmutableOptions& ioptions,
|
||||
const MutableCFOptions& mutable_cf_options, const Version* version,
|
||||
const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations,
|
||||
uint64_t version_number, ReadCallback* read_callback,
|
||||
DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr,
|
||||
bool expose_blob_index = false, bool allow_refresh = true);
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,16 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "rocksdb/rocksdb_namespace.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
constexpr uint64_t kInvalidBlobFileNumber = 0;
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,146 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
|
||||
#include "db/blob/blob_garbage_meter.h"
|
||||
#include "rocksdb/rocksdb_namespace.h"
|
||||
#include "rocksdb/status.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "test_util/sync_point.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
// An internal iterator that passes each key-value encountered to
|
||||
// BlobGarbageMeter as inflow in order to measure the total number and size of
|
||||
// blobs in the compaction input on a per-blob file basis.
|
||||
class BlobCountingIterator : public InternalIterator {
|
||||
public:
|
||||
BlobCountingIterator(InternalIterator* iter,
|
||||
BlobGarbageMeter* blob_garbage_meter)
|
||||
: iter_(iter), blob_garbage_meter_(blob_garbage_meter) {
|
||||
assert(iter_);
|
||||
assert(blob_garbage_meter_);
|
||||
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
bool Valid() const override { return iter_->Valid() && status_.ok(); }
|
||||
|
||||
void SeekToFirst() override {
|
||||
iter_->SeekToFirst();
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
void SeekToLast() override {
|
||||
iter_->SeekToLast();
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
void Seek(const Slice& target) override {
|
||||
iter_->Seek(target);
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
void SeekForPrev(const Slice& target) override {
|
||||
iter_->SeekForPrev(target);
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
void Next() override {
|
||||
assert(Valid());
|
||||
|
||||
iter_->Next();
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
bool NextAndGetResult(IterateResult* result) override {
|
||||
assert(Valid());
|
||||
|
||||
const bool res = iter_->NextAndGetResult(result);
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
return res;
|
||||
}
|
||||
|
||||
void Prev() override {
|
||||
assert(Valid());
|
||||
|
||||
iter_->Prev();
|
||||
UpdateAndCountBlobIfNeeded();
|
||||
}
|
||||
|
||||
Slice key() const override {
|
||||
assert(Valid());
|
||||
return iter_->key();
|
||||
}
|
||||
|
||||
Slice user_key() const override {
|
||||
assert(Valid());
|
||||
return iter_->user_key();
|
||||
}
|
||||
|
||||
Slice value() const override {
|
||||
assert(Valid());
|
||||
return iter_->value();
|
||||
}
|
||||
|
||||
Status status() const override { return status_; }
|
||||
|
||||
bool PrepareValue() override {
|
||||
assert(Valid());
|
||||
return iter_->PrepareValue();
|
||||
}
|
||||
|
||||
bool MayBeOutOfLowerBound() override {
|
||||
assert(Valid());
|
||||
return iter_->MayBeOutOfLowerBound();
|
||||
}
|
||||
|
||||
IterBoundCheck UpperBoundCheckResult() override {
|
||||
assert(Valid());
|
||||
return iter_->UpperBoundCheckResult();
|
||||
}
|
||||
|
||||
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
|
||||
iter_->SetPinnedItersMgr(pinned_iters_mgr);
|
||||
}
|
||||
|
||||
bool IsKeyPinned() const override {
|
||||
assert(Valid());
|
||||
return iter_->IsKeyPinned();
|
||||
}
|
||||
|
||||
bool IsValuePinned() const override {
|
||||
assert(Valid());
|
||||
return iter_->IsValuePinned();
|
||||
}
|
||||
|
||||
Status GetProperty(std::string prop_name, std::string* prop) override {
|
||||
return iter_->GetProperty(prop_name, prop);
|
||||
}
|
||||
|
||||
private:
|
||||
void UpdateAndCountBlobIfNeeded() {
|
||||
assert(!iter_->Valid() || iter_->status().ok());
|
||||
|
||||
if (!iter_->Valid()) {
|
||||
status_ = iter_->status();
|
||||
return;
|
||||
}
|
||||
|
||||
TEST_SYNC_POINT(
|
||||
"BlobCountingIterator::UpdateAndCountBlobIfNeeded:ProcessInFlow");
|
||||
|
||||
status_ = blob_garbage_meter_->ProcessInFlow(key(), value());
|
||||
}
|
||||
|
||||
InternalIterator* iter_;
|
||||
BlobGarbageMeter* blob_garbage_meter_;
|
||||
Status status_;
|
||||
};
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,326 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "db/blob/blob_counting_iterator.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "db/blob/blob_garbage_meter.h"
|
||||
#include "db/blob/blob_index.h"
|
||||
#include "db/blob/blob_log_format.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "test_util/testharness.h"
|
||||
#include "test_util/testutil.h"
|
||||
#include "util/vector_iterator.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
void CheckInFlow(const BlobGarbageMeter& blob_garbage_meter,
|
||||
uint64_t blob_file_number, uint64_t count, uint64_t bytes) {
|
||||
const auto& flows = blob_garbage_meter.flows();
|
||||
|
||||
const auto it = flows.find(blob_file_number);
|
||||
if (it == flows.end()) {
|
||||
ASSERT_EQ(count, 0);
|
||||
ASSERT_EQ(bytes, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& in = it->second.GetInFlow();
|
||||
|
||||
ASSERT_EQ(in.GetCount(), count);
|
||||
ASSERT_EQ(in.GetBytes(), bytes);
|
||||
}
|
||||
|
||||
TEST(BlobCountingIteratorTest, CountBlobs) {
|
||||
// Note: the input consists of three key-values: two are blob references to
|
||||
// different blob files, while the third one is a plain value.
|
||||
constexpr char user_key0[] = "key0";
|
||||
constexpr char user_key1[] = "key1";
|
||||
constexpr char user_key2[] = "key2";
|
||||
|
||||
const std::vector<std::string> keys{
|
||||
test::KeyStr(user_key0, 1, kTypeBlobIndex),
|
||||
test::KeyStr(user_key1, 2, kTypeBlobIndex),
|
||||
test::KeyStr(user_key2, 3, kTypeValue)};
|
||||
|
||||
constexpr uint64_t first_blob_file_number = 4;
|
||||
constexpr uint64_t first_offset = 1000;
|
||||
constexpr uint64_t first_size = 2000;
|
||||
|
||||
std::string first_blob_index;
|
||||
BlobIndex::EncodeBlob(&first_blob_index, first_blob_file_number, first_offset,
|
||||
first_size, kNoCompression);
|
||||
|
||||
constexpr uint64_t second_blob_file_number = 6;
|
||||
constexpr uint64_t second_offset = 2000;
|
||||
constexpr uint64_t second_size = 4000;
|
||||
|
||||
std::string second_blob_index;
|
||||
BlobIndex::EncodeBlob(&second_blob_index, second_blob_file_number,
|
||||
second_offset, second_size, kNoCompression);
|
||||
|
||||
const std::vector<std::string> values{first_blob_index, second_blob_index,
|
||||
"raw_value"};
|
||||
|
||||
assert(keys.size() == values.size());
|
||||
|
||||
VectorIterator input(keys, values);
|
||||
BlobGarbageMeter blob_garbage_meter;
|
||||
|
||||
BlobCountingIterator blob_counter(&input, &blob_garbage_meter);
|
||||
|
||||
constexpr uint64_t first_expected_bytes =
|
||||
first_size +
|
||||
BlobLogRecord::CalculateAdjustmentForRecordHeader(sizeof(user_key0) - 1);
|
||||
constexpr uint64_t second_expected_bytes =
|
||||
second_size +
|
||||
BlobLogRecord::CalculateAdjustmentForRecordHeader(sizeof(user_key1) - 1);
|
||||
|
||||
// Call SeekToFirst and iterate forward
|
||||
blob_counter.SeekToFirst();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[0]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key0);
|
||||
ASSERT_EQ(blob_counter.value(), values[0]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 1,
|
||||
first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 0, 0);
|
||||
|
||||
blob_counter.Next();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[1]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key1);
|
||||
ASSERT_EQ(blob_counter.value(), values[1]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 1,
|
||||
first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 1,
|
||||
second_expected_bytes);
|
||||
|
||||
blob_counter.Next();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[2]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key2);
|
||||
ASSERT_EQ(blob_counter.value(), values[2]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 1,
|
||||
first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 1,
|
||||
second_expected_bytes);
|
||||
|
||||
blob_counter.Next();
|
||||
ASSERT_FALSE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 1,
|
||||
first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 1,
|
||||
second_expected_bytes);
|
||||
|
||||
// Do it again using NextAndGetResult
|
||||
blob_counter.SeekToFirst();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[0]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key0);
|
||||
ASSERT_EQ(blob_counter.value(), values[0]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 2,
|
||||
2 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 1,
|
||||
second_expected_bytes);
|
||||
|
||||
{
|
||||
IterateResult result;
|
||||
ASSERT_TRUE(blob_counter.NextAndGetResult(&result));
|
||||
ASSERT_EQ(result.key, keys[1]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key1);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[1]);
|
||||
ASSERT_EQ(blob_counter.value(), values[1]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 2,
|
||||
2 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 2,
|
||||
2 * second_expected_bytes);
|
||||
}
|
||||
|
||||
{
|
||||
IterateResult result;
|
||||
ASSERT_TRUE(blob_counter.NextAndGetResult(&result));
|
||||
ASSERT_EQ(result.key, keys[2]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key2);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[2]);
|
||||
ASSERT_EQ(blob_counter.value(), values[2]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 2,
|
||||
2 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 2,
|
||||
2 * second_expected_bytes);
|
||||
}
|
||||
|
||||
{
|
||||
IterateResult result;
|
||||
ASSERT_FALSE(blob_counter.NextAndGetResult(&result));
|
||||
ASSERT_FALSE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 2,
|
||||
2 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 2,
|
||||
2 * second_expected_bytes);
|
||||
}
|
||||
|
||||
// Call SeekToLast and iterate backward
|
||||
blob_counter.SeekToLast();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[2]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key2);
|
||||
ASSERT_EQ(blob_counter.value(), values[2]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 2,
|
||||
2 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 2,
|
||||
2 * second_expected_bytes);
|
||||
|
||||
blob_counter.Prev();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[1]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key1);
|
||||
ASSERT_EQ(blob_counter.value(), values[1]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 2,
|
||||
2 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 3,
|
||||
3 * second_expected_bytes);
|
||||
|
||||
blob_counter.Prev();
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[0]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key0);
|
||||
ASSERT_EQ(blob_counter.value(), values[0]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 3,
|
||||
3 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 3,
|
||||
3 * second_expected_bytes);
|
||||
|
||||
blob_counter.Prev();
|
||||
ASSERT_FALSE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 3,
|
||||
3 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 3,
|
||||
3 * second_expected_bytes);
|
||||
|
||||
// Call Seek for all keys (plus one that's greater than all of them)
|
||||
blob_counter.Seek(keys[0]);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[0]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key0);
|
||||
ASSERT_EQ(blob_counter.value(), values[0]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 4,
|
||||
4 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 3,
|
||||
3 * second_expected_bytes);
|
||||
|
||||
blob_counter.Seek(keys[1]);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[1]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key1);
|
||||
ASSERT_EQ(blob_counter.value(), values[1]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 4,
|
||||
4 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 4,
|
||||
4 * second_expected_bytes);
|
||||
|
||||
blob_counter.Seek(keys[2]);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[2]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key2);
|
||||
ASSERT_EQ(blob_counter.value(), values[2]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 4,
|
||||
4 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 4,
|
||||
4 * second_expected_bytes);
|
||||
|
||||
blob_counter.Seek("zzz");
|
||||
ASSERT_FALSE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 4,
|
||||
4 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 4,
|
||||
4 * second_expected_bytes);
|
||||
|
||||
// Call SeekForPrev for all keys (plus one that's less than all of them)
|
||||
blob_counter.SeekForPrev("aaa");
|
||||
ASSERT_FALSE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 4,
|
||||
4 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 4,
|
||||
4 * second_expected_bytes);
|
||||
|
||||
blob_counter.SeekForPrev(keys[0]);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[0]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key0);
|
||||
ASSERT_EQ(blob_counter.value(), values[0]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 5,
|
||||
5 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 4,
|
||||
4 * second_expected_bytes);
|
||||
|
||||
blob_counter.SeekForPrev(keys[1]);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[1]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key1);
|
||||
ASSERT_EQ(blob_counter.value(), values[1]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 5,
|
||||
5 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 5,
|
||||
5 * second_expected_bytes);
|
||||
|
||||
blob_counter.SeekForPrev(keys[2]);
|
||||
ASSERT_TRUE(blob_counter.Valid());
|
||||
ASSERT_OK(blob_counter.status());
|
||||
ASSERT_EQ(blob_counter.key(), keys[2]);
|
||||
ASSERT_EQ(blob_counter.user_key(), user_key2);
|
||||
ASSERT_EQ(blob_counter.value(), values[2]);
|
||||
CheckInFlow(blob_garbage_meter, first_blob_file_number, 5,
|
||||
5 * first_expected_bytes);
|
||||
CheckInFlow(blob_garbage_meter, second_blob_file_number, 5,
|
||||
5 * second_expected_bytes);
|
||||
}
|
||||
|
||||
TEST(BlobCountingIteratorTest, CorruptBlobIndex) {
|
||||
const std::vector<std::string> keys{
|
||||
test::KeyStr("user_key", 1, kTypeBlobIndex)};
|
||||
const std::vector<std::string> values{"i_am_not_a_blob_index"};
|
||||
|
||||
assert(keys.size() == values.size());
|
||||
|
||||
VectorIterator input(keys, values);
|
||||
BlobGarbageMeter blob_garbage_meter;
|
||||
|
||||
BlobCountingIterator blob_counter(&input, &blob_garbage_meter);
|
||||
|
||||
blob_counter.SeekToFirst();
|
||||
ASSERT_FALSE(blob_counter.Valid());
|
||||
ASSERT_NOK(blob_counter.status());
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "db/blob/blob_fetcher.h"
|
||||
|
||||
#include "db/version_set.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
Status BlobFetcher::FetchBlob(const Slice& user_key,
|
||||
const Slice& blob_index_slice,
|
||||
FilePrefetchBuffer* prefetch_buffer,
|
||||
PinnableSlice* blob_value,
|
||||
uint64_t* bytes_read) const {
|
||||
assert(version_);
|
||||
|
||||
return version_->GetBlob(read_options_, user_key, blob_index_slice,
|
||||
prefetch_buffer, blob_value, bytes_read);
|
||||
}
|
||||
|
||||
Status BlobFetcher::FetchBlob(const Slice& user_key,
|
||||
const BlobIndex& blob_index,
|
||||
FilePrefetchBuffer* prefetch_buffer,
|
||||
PinnableSlice* blob_value,
|
||||
uint64_t* bytes_read) const {
|
||||
assert(version_);
|
||||
|
||||
return version_->GetBlob(read_options_, user_key, blob_index, prefetch_buffer,
|
||||
blob_value, bytes_read);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,37 +0,0 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/status.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
class Version;
|
||||
class Slice;
|
||||
class FilePrefetchBuffer;
|
||||
class PinnableSlice;
|
||||
class BlobIndex;
|
||||
|
||||
// A thin wrapper around the blob retrieval functionality of Version.
|
||||
class BlobFetcher {
|
||||
public:
|
||||
BlobFetcher(const Version* version, const ReadOptions& read_options)
|
||||
: version_(version), read_options_(read_options) {}
|
||||
|
||||
Status FetchBlob(const Slice& user_key, const Slice& blob_index_slice,
|
||||
FilePrefetchBuffer* prefetch_buffer,
|
||||
PinnableSlice* blob_value, uint64_t* bytes_read) const;
|
||||
|
||||
Status FetchBlob(const Slice& user_key, const BlobIndex& blob_index,
|
||||
FilePrefetchBuffer* prefetch_buffer,
|
||||
PinnableSlice* blob_value, uint64_t* bytes_read) const;
|
||||
|
||||
private:
|
||||
const Version* version_;
|
||||
ReadOptions read_options_;
|
||||
};
|
||||
} // namespace ROCKSDB_NAMESPACE
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user