Compare commits
20 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
9e3088a66b | ||
|
f6d4b8a5a0 | ||
|
eef6001ea2 | ||
|
0915c99f01 | ||
|
4f48a2af7b | ||
|
aaabdc85d9 | ||
|
5929273ea6 | ||
|
b0864639e8 | ||
|
59d47e8c3e | ||
|
b88bb93514 | ||
|
944346d0fa | ||
|
02fd7ca92a | ||
|
0eab26e482 | ||
|
25dce8fd7c | ||
|
10c141a3b7 | ||
|
2c62c227ae | ||
|
0bc8750e82 | ||
|
f6b3de76e5 | ||
|
974dfc3de6 | ||
|
cad5db1c3e |
@ -81,7 +81,9 @@ install:
|
||||
sudo apt-get install -y mingw-w64 ;
|
||||
fi
|
||||
- if [[ "${JOB_NAME}" == cmake* ]] && [ "${TRAVIS_OS_NAME}" == linux ]; then
|
||||
mkdir cmake-dist && curl -sfSL https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-Linux-x86_64.tar.gz | tar --strip-components=1 -C cmake-dist -xz && export PATH=$PWD/cmake-dist/bin:$PATH;
|
||||
CMAKE_DIST_URL="https://rocksdb-deps.s3-us-west-2.amazonaws.com/cmake/cmake-3.14.5-Linux-$(uname -m).tar.bz2";
|
||||
TAR_OPT="--strip-components=1 -xj";
|
||||
mkdir cmake-dist && curl --silent --fail --show-error --location "${CMAKE_DIST_URL}" | tar -C cmake-dist ${TAR_OPT} && export PATH=$PWD/cmake-dist/bin:$PATH;
|
||||
fi
|
||||
- if [[ "${JOB_NAME}" == java_test ]]; then
|
||||
java -version && echo "JAVA_HOME=${JAVA_HOME}";
|
||||
|
@ -1166,9 +1166,15 @@ if(WITH_BENCHMARK_TOOLS)
|
||||
${ROCKSDB_LIB})
|
||||
endif()
|
||||
|
||||
option(WITH_CORE_TOOLS "build with ldb and sst_dump" ON)
|
||||
option(WITH_TOOLS "build with tools" ON)
|
||||
if(WITH_TOOLS)
|
||||
if(WITH_CORE_TOOLS OR WITH_TOOLS)
|
||||
add_subdirectory(tools)
|
||||
add_custom_target(core_tools
|
||||
DEPENDS ${core_tool_deps})
|
||||
endif()
|
||||
|
||||
if(WITH_TOOLS)
|
||||
add_subdirectory(db_stress_tool)
|
||||
add_custom_target(tools
|
||||
DEPENDS ${tool_deps})
|
||||
|
18
HISTORY.md
18
HISTORY.md
@ -1,5 +1,21 @@
|
||||
# Rocksdb Change Log
|
||||
## Unreleased
|
||||
### Bug Fixes
|
||||
* Upgraded version of bzip library (1.0.6 -> 1.0.8) used with RocksJava to address potential vulnerabilities if an attacker can manipulate compressed data saved and loaded by RocksDB (not normal). See issue #6703.
|
||||
|
||||
## 6.7.3 (03/18/2020)
|
||||
### Bug Fixes
|
||||
* Fix a data race that might cause crash when calling DB::GetCreationTimeOfOldestFile() by a small chance. The bug was introduced in 6.6 Release.
|
||||
|
||||
## 6.7.2 (02/24/2020)
|
||||
### Bug Fixes
|
||||
* Fixed a bug of IO Uring partial result handling introduced in 6.7.0.
|
||||
|
||||
|
||||
## 6.7.1 (02/13/2020)
|
||||
### Bug Fixes
|
||||
* Fixed issue #6316 that can cause a corruption of the MANIFEST file in the middle when writing to it fails due to no disk space.
|
||||
* Batched MultiGet() ignores IO errors while reading data blocks, causing it to potentially continue looking for a key and returning stale results.
|
||||
|
||||
## 6.7.0 (01/21/2020)
|
||||
### Public API Change
|
||||
@ -14,7 +30,6 @@
|
||||
* Fix a race condition for cfd->log_number_ between manifest switch and memtable switch (PR 6249) when number of column families is greater than 1.
|
||||
* Fix a bug on fractional cascading index when multiple files at the same level contain the same smallest user key, and those user keys are for merge operands. In this case, Get() the exact key may miss some merge operands.
|
||||
* Delcare kHashSearch index type feature-incompatible with index_block_restart_interval larger than 1.
|
||||
* Fix incorrect results while block-based table uses kHashSearch, together with Prev()/SeekForPrev().
|
||||
* Fixed an issue where the thread pools were not resized upon setting `max_background_jobs` dynamically through the `SetDBOptions` interface.
|
||||
* Fix a bug that can cause write threads to hang when a slowdown/stall happens and there is a mix of writers with WriteOptions::no_slowdown set/unset.
|
||||
* Fixed an issue where an incorrect "number of input records" value was used to compute the "records dropped" statistics for compactions.
|
||||
@ -23,6 +38,7 @@
|
||||
* It is now possible to enable periodic compactions for the base DB when using BlobDB.
|
||||
* BlobDB now garbage collects non-TTL blobs when `enable_garbage_collection` is set to `true` in `BlobDBOptions`. Garbage collection is performed during compaction: any valid blobs located in the oldest N files (where N is the number of non-TTL blob files multiplied by the value of `BlobDBOptions::garbage_collection_cutoff`) encountered during compaction get relocated to new blob files, and old blob files are dropped once they are no longer needed. Note: we recommend enabling periodic compactions for the base DB when using this feature to deal with the case when some old blob files are kept alive by SSTs that otherwise do not get picked for compaction.
|
||||
* `db_bench` now supports the `garbage_collection_cutoff` option for BlobDB.
|
||||
* MultiGet() can use IO Uring to parallelize read from the same SST file. This featuer is by default disabled. It can be enabled with environment variable ROCKSDB_USE_IO_URING.
|
||||
|
||||
## 6.6.2 (01/13/2020)
|
||||
### Bug Fixes
|
||||
|
47
Makefile
47
Makefile
@ -1128,16 +1128,21 @@ unity_test: db/db_test.o db/db_test_util.o $(TESTHARNESS) $(TOOLLIBOBJECTS) unit
|
||||
rocksdb.h rocksdb.cc: build_tools/amalgamate.py Makefile $(LIB_SOURCES) unity.cc
|
||||
build_tools/amalgamate.py -I. -i./include unity.cc -x include/rocksdb/c.h -H rocksdb.h -o rocksdb.cc
|
||||
|
||||
clean: clean-ext-libraries-all clean-rocks
|
||||
clean: clean-ext-libraries-all clean-rocks clean-rocksjava
|
||||
|
||||
clean-not-downloaded: clean-ext-libraries-bin clean-rocks
|
||||
clean-not-downloaded: clean-ext-libraries-bin clean-rocks clean-not-downloaded-rocksjava
|
||||
|
||||
clean-rocks:
|
||||
rm -f $(BENCHMARKS) $(TOOLS) $(TESTS) $(PARALLEL_TEST) $(LIBRARY) $(SHARED)
|
||||
rm -rf $(CLEAN_FILES) ios-x86 ios-arm scan_build_report
|
||||
$(FIND) . -name "*.[oda]" -exec rm -f {} \;
|
||||
$(FIND) . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
|
||||
cd java; $(MAKE) clean
|
||||
|
||||
clean-rocksjava:
|
||||
cd java && $(MAKE) clean
|
||||
|
||||
clean-not-downloaded-rocksjava:
|
||||
cd java && $(MAKE) clean-not-downloaded
|
||||
|
||||
clean-ext-libraries-all:
|
||||
rm -rf bzip2* snappy* zlib* lz4* zstd*
|
||||
@ -1784,11 +1789,11 @@ SHA256_CMD = sha256sum
|
||||
ZLIB_VER ?= 1.2.11
|
||||
ZLIB_SHA256 ?= c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1
|
||||
ZLIB_DOWNLOAD_BASE ?= http://zlib.net
|
||||
BZIP2_VER ?= 1.0.6
|
||||
BZIP2_SHA256 ?= a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd
|
||||
BZIP2_DOWNLOAD_BASE ?= https://downloads.sourceforge.net/project/bzip2
|
||||
SNAPPY_VER ?= 1.1.7
|
||||
SNAPPY_SHA256 ?= 3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4
|
||||
BZIP2_VER ?= 1.0.8
|
||||
BZIP2_SHA256 ?= ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269
|
||||
BZIP2_DOWNLOAD_BASE ?= https://sourceware.org/pub/bzip2
|
||||
SNAPPY_VER ?= 1.1.8
|
||||
SNAPPY_SHA256 ?= 16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f
|
||||
SNAPPY_DOWNLOAD_BASE ?= https://github.com/google/snappy/archive
|
||||
LZ4_VER ?= 1.9.2
|
||||
LZ4_SHA256 ?= 658ba6191fa44c92280d4aa2c271b0f4fbc0e34d249578dd05e50e76d0e5efcc
|
||||
@ -1834,7 +1839,7 @@ endif
|
||||
libz.a:
|
||||
-rm -rf zlib-$(ZLIB_VER)
|
||||
ifeq (,$(wildcard ./zlib-$(ZLIB_VER).tar.gz))
|
||||
curl --output zlib-$(ZLIB_VER).tar.gz -L ${ZLIB_DOWNLOAD_BASE}/zlib-$(ZLIB_VER).tar.gz
|
||||
curl --fail --output zlib-$(ZLIB_VER).tar.gz --location ${ZLIB_DOWNLOAD_BASE}/zlib-$(ZLIB_VER).tar.gz
|
||||
endif
|
||||
ZLIB_SHA256_ACTUAL=`$(SHA256_CMD) zlib-$(ZLIB_VER).tar.gz | cut -d ' ' -f 1`; \
|
||||
if [ "$(ZLIB_SHA256)" != "$$ZLIB_SHA256_ACTUAL" ]; then \
|
||||
@ -1848,7 +1853,7 @@ endif
|
||||
libbz2.a:
|
||||
-rm -rf bzip2-$(BZIP2_VER)
|
||||
ifeq (,$(wildcard ./bzip2-$(BZIP2_VER).tar.gz))
|
||||
curl --output bzip2-$(BZIP2_VER).tar.gz -L ${CURL_SSL_OPTS} ${BZIP2_DOWNLOAD_BASE}/bzip2-$(BZIP2_VER).tar.gz
|
||||
curl --fail --output bzip2-$(BZIP2_VER).tar.gz --location ${CURL_SSL_OPTS} ${BZIP2_DOWNLOAD_BASE}/bzip2-$(BZIP2_VER).tar.gz
|
||||
endif
|
||||
BZIP2_SHA256_ACTUAL=`$(SHA256_CMD) bzip2-$(BZIP2_VER).tar.gz | cut -d ' ' -f 1`; \
|
||||
if [ "$(BZIP2_SHA256)" != "$$BZIP2_SHA256_ACTUAL" ]; then \
|
||||
@ -1862,7 +1867,7 @@ endif
|
||||
libsnappy.a:
|
||||
-rm -rf snappy-$(SNAPPY_VER)
|
||||
ifeq (,$(wildcard ./snappy-$(SNAPPY_VER).tar.gz))
|
||||
curl --output snappy-$(SNAPPY_VER).tar.gz -L ${CURL_SSL_OPTS} ${SNAPPY_DOWNLOAD_BASE}/$(SNAPPY_VER).tar.gz
|
||||
curl --fail --output snappy-$(SNAPPY_VER).tar.gz --location ${CURL_SSL_OPTS} ${SNAPPY_DOWNLOAD_BASE}/$(SNAPPY_VER).tar.gz
|
||||
endif
|
||||
SNAPPY_SHA256_ACTUAL=`$(SHA256_CMD) snappy-$(SNAPPY_VER).tar.gz | cut -d ' ' -f 1`; \
|
||||
if [ "$(SNAPPY_SHA256)" != "$$SNAPPY_SHA256_ACTUAL" ]; then \
|
||||
@ -1877,7 +1882,7 @@ endif
|
||||
liblz4.a:
|
||||
-rm -rf lz4-$(LZ4_VER)
|
||||
ifeq (,$(wildcard ./lz4-$(LZ4_VER).tar.gz))
|
||||
curl --output lz4-$(LZ4_VER).tar.gz -L ${CURL_SSL_OPTS} ${LZ4_DOWNLOAD_BASE}/v$(LZ4_VER).tar.gz
|
||||
curl --fail --output lz4-$(LZ4_VER).tar.gz --location ${CURL_SSL_OPTS} ${LZ4_DOWNLOAD_BASE}/v$(LZ4_VER).tar.gz
|
||||
endif
|
||||
LZ4_SHA256_ACTUAL=`$(SHA256_CMD) lz4-$(LZ4_VER).tar.gz | cut -d ' ' -f 1`; \
|
||||
if [ "$(LZ4_SHA256)" != "$$LZ4_SHA256_ACTUAL" ]; then \
|
||||
@ -1891,7 +1896,7 @@ endif
|
||||
libzstd.a:
|
||||
-rm -rf zstd-$(ZSTD_VER)
|
||||
ifeq (,$(wildcard ./zstd-$(ZSTD_VER).tar.gz))
|
||||
curl --output zstd-$(ZSTD_VER).tar.gz -L ${CURL_SSL_OPTS} ${ZSTD_DOWNLOAD_BASE}/v$(ZSTD_VER).tar.gz
|
||||
curl --fail --output zstd-$(ZSTD_VER).tar.gz --location ${CURL_SSL_OPTS} ${ZSTD_DOWNLOAD_BASE}/v$(ZSTD_VER).tar.gz
|
||||
endif
|
||||
ZSTD_SHA256_ACTUAL=`$(SHA256_CMD) zstd-$(ZSTD_VER).tar.gz | cut -d ' ' -f 1`; \
|
||||
if [ "$(ZSTD_SHA256)" != "$$ZSTD_SHA256_ACTUAL" ]; then \
|
||||
@ -1961,35 +1966,35 @@ rocksdbjavastaticreleasedocker: rocksdbjavastatic rocksdbjavastaticdockerx86 roc
|
||||
|
||||
rocksdbjavastaticdockerx86:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_x86-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos6_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_x86-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos6_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerx86_64:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_x64-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos6_x64-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_x64-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos6_x64-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerppc64le:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_ppc64le-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos7_ppc64le-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_ppc64le-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos7_ppc64le-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerarm64v8:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_arm64v8-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos7_arm64v8-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_arm64v8-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos7_arm64v8-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerx86musl:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_x86-musl-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_x86-musl-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerx86_64musl:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_x64-musl-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_x64-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_x64-musl-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_x64-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerppc64lemusl:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_ppc64le-musl-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_ppc64le-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_ppc64le-musl-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_ppc64le-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticdockerarm64v8musl:
|
||||
mkdir -p java/target
|
||||
docker run --rm --name rocksdb_linux_arm64v8-musl-be --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_arm64v8-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
docker run --rm --name rocksdb_linux_arm64v8-musl-be --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:alpine3_arm64v8-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh
|
||||
|
||||
rocksdbjavastaticpublish: rocksdbjavastaticrelease rocksdbjavastaticpublishcentral
|
||||
|
||||
|
1
TARGETS
1
TARGETS
@ -49,6 +49,7 @@ ROCKSDB_OS_PREPROCESSOR_FLAGS = [
|
||||
"-DROCKSDB_SCHED_GETCPU_PRESENT",
|
||||
"-DROCKSDB_IOURING_PRESENT",
|
||||
"-DHAVE_SSE42",
|
||||
"-DLIBURING",
|
||||
"-DNUMA",
|
||||
],
|
||||
),
|
||||
|
@ -29,7 +29,7 @@ install:
|
||||
- md %THIRDPARTY_HOME%
|
||||
- echo "Building Snappy dependency..."
|
||||
- cd %THIRDPARTY_HOME%
|
||||
- curl -fsSL -o snappy-1.1.7.zip https://github.com/google/snappy/archive/1.1.7.zip
|
||||
- curl --fail --silent --show-error --output snappy-1.1.7.zip --location https://github.com/google/snappy/archive/1.1.7.zip
|
||||
- unzip snappy-1.1.7.zip
|
||||
- cd snappy-1.1.7
|
||||
- mkdir build
|
||||
@ -39,7 +39,7 @@ install:
|
||||
- msbuild Snappy.sln /p:Configuration=Release /p:Platform=x64
|
||||
- echo "Building LZ4 dependency..."
|
||||
- cd %THIRDPARTY_HOME%
|
||||
- curl -fsSL -o lz4-1.8.3.zip https://github.com/lz4/lz4/archive/v1.8.3.zip
|
||||
- curl --fail --silent --show-error --output lz4-1.8.3.zip --location https://github.com/lz4/lz4/archive/v1.8.3.zip
|
||||
- unzip lz4-1.8.3.zip
|
||||
- cd lz4-1.8.3\visual\VS2010
|
||||
- ps: $CMD="$Env:DEV_ENV"; & $CMD lz4.sln /upgrade
|
||||
@ -47,7 +47,7 @@ install:
|
||||
- msbuild lz4.sln /p:Configuration=Release /p:Platform=x64
|
||||
- echo "Building ZStd dependency..."
|
||||
- cd %THIRDPARTY_HOME%
|
||||
- curl -fsSL -o zstd-1.4.0.zip https://github.com/facebook/zstd/archive/v1.4.0.zip
|
||||
- curl --fail --silent --show-error --output zstd-1.4.0.zip --location https://github.com/facebook/zstd/archive/v1.4.0.zip
|
||||
- unzip zstd-1.4.0.zip
|
||||
- cd zstd-1.4.0\build\VS2010
|
||||
- ps: $CMD="$Env:DEV_ENV"; & $CMD zstd.sln /upgrade
|
||||
|
@ -55,6 +55,7 @@ ROCKSDB_OS_PREPROCESSOR_FLAGS = [
|
||||
"-DROCKSDB_SCHED_GETCPU_PRESENT",
|
||||
"-DROCKSDB_IOURING_PRESENT",
|
||||
"-DHAVE_SSE42",
|
||||
"-DLIBURING",
|
||||
"-DNUMA",
|
||||
],
|
||||
),
|
||||
|
@ -150,18 +150,20 @@ case "$TARGET_OS" in
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -latomic"
|
||||
fi
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
|
||||
# check for liburing
|
||||
$CXX $CFLAGS -x c++ - -luring -o /dev/null 2>/dev/null <<EOF
|
||||
#include <liburing.h>
|
||||
int main() {
|
||||
struct io_uring ring;
|
||||
io_uring_queue_init(1, &ring, 0);
|
||||
return 0;
|
||||
}
|
||||
if test $ROCKSDB_USE_IO_URING; then
|
||||
# check for liburing
|
||||
$CXX $CFLAGS -x c++ - -luring -o /dev/null 2>/dev/null <<EOF
|
||||
#include <liburing.h>
|
||||
int main() {
|
||||
struct io_uring ring;
|
||||
io_uring_queue_init(1, &ring, 0);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -luring"
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_IOURING_PRESENT"
|
||||
if [ "$?" = 0 ]; then
|
||||
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -luring"
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_IOURING_PRESENT"
|
||||
fi
|
||||
fi
|
||||
if test -z "$USE_FOLLY_DISTRIBUTED_MUTEX"; then
|
||||
USE_FOLLY_DISTRIBUTED_MUTEX=1
|
||||
|
@ -378,7 +378,7 @@ function send_to_ods {
|
||||
echo >&2 "ERROR: Key $key doesn't have a value."
|
||||
return
|
||||
fi
|
||||
curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
|
||||
curl --silent "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
|
||||
--connect-timeout 60
|
||||
}
|
||||
|
||||
|
@ -880,7 +880,7 @@ run_regression()
|
||||
|
||||
# parameters: $1 -- key, $2 -- value
|
||||
function send_size_to_ods {
|
||||
curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=rocksdb.build_size.$1&value=$2" \
|
||||
curl --silent "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=rocksdb.build_size.$1&value=$2" \
|
||||
--connect-timeout 60
|
||||
}
|
||||
|
||||
|
@ -1405,6 +1405,91 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(DBBasicTest, MultiGetBatchedMultiLevelMerge) {
|
||||
Options options = CurrentOptions();
|
||||
options.disable_auto_compactions = true;
|
||||
options.merge_operator = MergeOperators::CreateStringAppendOperator();
|
||||
BlockBasedTableOptions bbto;
|
||||
bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
||||
Reopen(options);
|
||||
int num_keys = 0;
|
||||
|
||||
for (int i = 0; i < 128; ++i) {
|
||||
ASSERT_OK(Put("key_" + std::to_string(i), "val_l2_" + std::to_string(i)));
|
||||
num_keys++;
|
||||
if (num_keys == 8) {
|
||||
Flush();
|
||||
num_keys = 0;
|
||||
}
|
||||
}
|
||||
if (num_keys > 0) {
|
||||
Flush();
|
||||
num_keys = 0;
|
||||
}
|
||||
MoveFilesToLevel(2);
|
||||
|
||||
for (int i = 0; i < 128; i += 3) {
|
||||
ASSERT_OK(Merge("key_" + std::to_string(i), "val_l1_" + std::to_string(i)));
|
||||
num_keys++;
|
||||
if (num_keys == 8) {
|
||||
Flush();
|
||||
num_keys = 0;
|
||||
}
|
||||
}
|
||||
if (num_keys > 0) {
|
||||
Flush();
|
||||
num_keys = 0;
|
||||
}
|
||||
MoveFilesToLevel(1);
|
||||
|
||||
for (int i = 0; i < 128; i += 5) {
|
||||
ASSERT_OK(Merge("key_" + std::to_string(i), "val_l0_" + std::to_string(i)));
|
||||
num_keys++;
|
||||
if (num_keys == 8) {
|
||||
Flush();
|
||||
num_keys = 0;
|
||||
}
|
||||
}
|
||||
if (num_keys > 0) {
|
||||
Flush();
|
||||
num_keys = 0;
|
||||
}
|
||||
ASSERT_EQ(0, num_keys);
|
||||
|
||||
for (int i = 0; i < 128; i += 9) {
|
||||
ASSERT_OK(Merge("key_" + std::to_string(i), "val_mem_" + std::to_string(i)));
|
||||
}
|
||||
|
||||
std::vector<std::string> keys;
|
||||
std::vector<std::string> values;
|
||||
|
||||
for (int i = 32; i < 80; ++i) {
|
||||
keys.push_back("key_" + std::to_string(i));
|
||||
}
|
||||
|
||||
values = MultiGet(keys, nullptr);
|
||||
ASSERT_EQ(values.size(), keys.size());
|
||||
for (unsigned int j = 0; j < 48; ++j) {
|
||||
int key = j + 32;
|
||||
std::string value;
|
||||
value.append("val_l2_" + std::to_string(key));
|
||||
if (key % 3 == 0) {
|
||||
value.append(",");
|
||||
value.append("val_l1_" + std::to_string(key));
|
||||
}
|
||||
if (key % 5 == 0) {
|
||||
value.append(",");
|
||||
value.append("val_l0_" + std::to_string(key));
|
||||
}
|
||||
if (key % 9 == 0) {
|
||||
value.append(",");
|
||||
value.append("val_mem_" + std::to_string(key));
|
||||
}
|
||||
ASSERT_EQ(values[j], value);
|
||||
}
|
||||
}
|
||||
|
||||
// Test class for batched MultiGet with prefix extractor
|
||||
// Param bool - If true, use partitioned filters
|
||||
// If false, use full filter block
|
||||
@ -2011,6 +2096,90 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(DBBasicTestWithParallelIO, MultiGetWithChecksumMismatch) {
|
||||
std::vector<std::string> key_data(10);
|
||||
std::vector<Slice> keys;
|
||||
// We cannot resize a PinnableSlice vector, so just set initial size to
|
||||
// largest we think we will need
|
||||
std::vector<PinnableSlice> values(10);
|
||||
std::vector<Status> statuses;
|
||||
int read_count = 0;
|
||||
ReadOptions ro;
|
||||
ro.fill_cache = fill_cache();
|
||||
|
||||
SyncPoint::GetInstance()->SetCallBack(
|
||||
"RetrieveMultipleBlocks:VerifyChecksum", [&](void *status) {
|
||||
Status* s = static_cast<Status*>(status);
|
||||
read_count++;
|
||||
if (read_count == 2) {
|
||||
*s = Status::Corruption();
|
||||
}
|
||||
});
|
||||
SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
// Warm up the cache first
|
||||
key_data.emplace_back(Key(0));
|
||||
keys.emplace_back(Slice(key_data.back()));
|
||||
key_data.emplace_back(Key(50));
|
||||
keys.emplace_back(Slice(key_data.back()));
|
||||
statuses.resize(keys.size());
|
||||
|
||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
ASSERT_TRUE(CheckValue(0, values[0].ToString()));
|
||||
//ASSERT_TRUE(CheckValue(50, values[1].ToString()));
|
||||
ASSERT_EQ(statuses[0], Status::OK());
|
||||
ASSERT_EQ(statuses[1], Status::Corruption());
|
||||
|
||||
SyncPoint::GetInstance()->DisableProcessing();
|
||||
}
|
||||
|
||||
TEST_P(DBBasicTestWithParallelIO, MultiGetWithMissingFile) {
|
||||
std::vector<std::string> key_data(10);
|
||||
std::vector<Slice> keys;
|
||||
// We cannot resize a PinnableSlice vector, so just set initial size to
|
||||
// largest we think we will need
|
||||
std::vector<PinnableSlice> values(10);
|
||||
std::vector<Status> statuses;
|
||||
ReadOptions ro;
|
||||
ro.fill_cache = fill_cache();
|
||||
|
||||
SyncPoint::GetInstance()->SetCallBack(
|
||||
"TableCache::MultiGet:FindTable", [&](void *status) {
|
||||
Status* s = static_cast<Status*>(status);
|
||||
*s = Status::IOError();
|
||||
});
|
||||
// DB open will create table readers unless we reduce the table cache
|
||||
// capacity.
|
||||
// SanitizeOptions will set max_open_files to minimum of 20. Table cache
|
||||
// is allocated with max_open_files - 10 as capacity. So override
|
||||
// max_open_files to 11 so table cache capacity will become 1. This will
|
||||
// prevent file open during DB open and force the file to be opened
|
||||
// during MultiGet
|
||||
SyncPoint::GetInstance()->SetCallBack(
|
||||
"SanitizeOptions::AfterChangeMaxOpenFiles", [&](void *arg) {
|
||||
int* max_open_files = (int*)arg;
|
||||
*max_open_files = 11;
|
||||
});
|
||||
SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Reopen(CurrentOptions());
|
||||
|
||||
// Warm up the cache first
|
||||
key_data.emplace_back(Key(0));
|
||||
keys.emplace_back(Slice(key_data.back()));
|
||||
key_data.emplace_back(Key(50));
|
||||
keys.emplace_back(Slice(key_data.back()));
|
||||
statuses.resize(keys.size());
|
||||
|
||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
ASSERT_EQ(statuses[0], Status::IOError());
|
||||
ASSERT_EQ(statuses[1], Status::IOError());
|
||||
|
||||
SyncPoint::GetInstance()->DisableProcessing();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
ParallelIO, DBBasicTestWithParallelIO,
|
||||
// Params are as follows -
|
||||
|
@ -4495,13 +4495,21 @@ Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
|
||||
if (mutable_db_options_.max_open_files == -1) {
|
||||
uint64_t oldest_time = port::kMaxUint64;
|
||||
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
||||
uint64_t ctime;
|
||||
cfd->current()->GetCreationTimeOfOldestFile(&ctime);
|
||||
if (ctime < oldest_time) {
|
||||
oldest_time = ctime;
|
||||
}
|
||||
if (oldest_time == 0) {
|
||||
break;
|
||||
if (!cfd->IsDropped()) {
|
||||
uint64_t ctime;
|
||||
{
|
||||
SuperVersion* sv = GetAndRefSuperVersion(cfd);
|
||||
Version* version = sv->current;
|
||||
version->GetCreationTimeOfOldestFile(&ctime);
|
||||
ReturnAndCleanupSuperVersion(cfd, sv);
|
||||
}
|
||||
|
||||
if (ctime < oldest_time) {
|
||||
oldest_time = ctime;
|
||||
}
|
||||
if (oldest_time == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
*creation_time = oldest_time;
|
||||
|
104
db/db_test2.cc
104
db/db_test2.cc
@ -4300,110 +4300,6 @@ TEST_F(DBTest2, SameSmallestInSameLevel) {
|
||||
|
||||
ASSERT_EQ("2,3,4,5,6,7,8", Get("key"));
|
||||
}
|
||||
|
||||
TEST_F(DBTest2, BlockBasedTablePrefixIndexSeekForPrev) {
|
||||
// create a DB with block prefix index
|
||||
BlockBasedTableOptions table_options;
|
||||
Options options = CurrentOptions();
|
||||
table_options.block_size = 300;
|
||||
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
||||
table_options.index_shortening =
|
||||
BlockBasedTableOptions::IndexShorteningMode::kNoShortening;
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
||||
|
||||
Reopen(options);
|
||||
|
||||
Random rnd(301);
|
||||
std::string large_value = RandomString(&rnd, 500);
|
||||
|
||||
ASSERT_OK(Put("a1", large_value));
|
||||
ASSERT_OK(Put("x1", large_value));
|
||||
ASSERT_OK(Put("y1", large_value));
|
||||
Flush();
|
||||
|
||||
{
|
||||
std::unique_ptr<Iterator> iterator(db_->NewIterator(ReadOptions()));
|
||||
iterator->SeekForPrev("x3");
|
||||
ASSERT_TRUE(iterator->Valid());
|
||||
ASSERT_EQ("x1", iterator->key().ToString());
|
||||
|
||||
iterator->SeekForPrev("a3");
|
||||
ASSERT_TRUE(iterator->Valid());
|
||||
ASSERT_EQ("a1", iterator->key().ToString());
|
||||
|
||||
iterator->SeekForPrev("y3");
|
||||
ASSERT_TRUE(iterator->Valid());
|
||||
ASSERT_EQ("y1", iterator->key().ToString());
|
||||
|
||||
// Query more than one non-existing prefix to cover the case both
|
||||
// of empty hash bucket and hash bucket conflict.
|
||||
iterator->SeekForPrev("b1");
|
||||
// Result should be not valid or "a1".
|
||||
if (iterator->Valid()) {
|
||||
ASSERT_EQ("a1", iterator->key().ToString());
|
||||
}
|
||||
|
||||
iterator->SeekForPrev("c1");
|
||||
// Result should be not valid or "a1".
|
||||
if (iterator->Valid()) {
|
||||
ASSERT_EQ("a1", iterator->key().ToString());
|
||||
}
|
||||
|
||||
iterator->SeekForPrev("d1");
|
||||
// Result should be not valid or "a1".
|
||||
if (iterator->Valid()) {
|
||||
ASSERT_EQ("a1", iterator->key().ToString());
|
||||
}
|
||||
|
||||
iterator->SeekForPrev("y3");
|
||||
ASSERT_TRUE(iterator->Valid());
|
||||
ASSERT_EQ("y1", iterator->key().ToString());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(DBTest2, BlockBasedTablePrefixGetIndexNotFound) {
|
||||
// create a DB with block prefix index
|
||||
BlockBasedTableOptions table_options;
|
||||
Options options = CurrentOptions();
|
||||
table_options.block_size = 300;
|
||||
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
||||
table_options.index_shortening =
|
||||
BlockBasedTableOptions::IndexShorteningMode::kNoShortening;
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
||||
options.level0_file_num_compaction_trigger = 8;
|
||||
|
||||
Reopen(options);
|
||||
|
||||
ASSERT_OK(Put("b1", "ok"));
|
||||
Flush();
|
||||
|
||||
// Flushing several files so that the chance that hash bucket
|
||||
// is empty fo "b" in at least one of the files is high.
|
||||
ASSERT_OK(Put("a1", ""));
|
||||
ASSERT_OK(Put("c1", ""));
|
||||
Flush();
|
||||
|
||||
ASSERT_OK(Put("a2", ""));
|
||||
ASSERT_OK(Put("c2", ""));
|
||||
Flush();
|
||||
|
||||
ASSERT_OK(Put("a3", ""));
|
||||
ASSERT_OK(Put("c3", ""));
|
||||
Flush();
|
||||
|
||||
ASSERT_OK(Put("a4", ""));
|
||||
ASSERT_OK(Put("c4", ""));
|
||||
Flush();
|
||||
|
||||
ASSERT_OK(Put("a5", ""));
|
||||
ASSERT_OK(Put("c5", ""));
|
||||
Flush();
|
||||
|
||||
ASSERT_EQ("ok", Get("b1"));
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
#ifdef ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS
|
||||
|
@ -166,12 +166,6 @@ Status ErrorHandler::SetBGError(const Status& bg_err, BackgroundErrorReason reas
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Check if recovery is currently in progress. If it is, we will save this
|
||||
// error so we can check it at the end to see if recovery succeeded or not
|
||||
if (recovery_in_prog_ && recovery_error_.ok()) {
|
||||
recovery_error_ = bg_err;
|
||||
}
|
||||
|
||||
bool paranoid = db_options_.paranoid_checks;
|
||||
Status::Severity sev = Status::Severity::kFatalError;
|
||||
Status new_bg_err;
|
||||
@ -204,10 +198,15 @@ Status ErrorHandler::SetBGError(const Status& bg_err, BackgroundErrorReason reas
|
||||
|
||||
new_bg_err = Status(bg_err, sev);
|
||||
|
||||
// Check if recovery is currently in progress. If it is, we will save this
|
||||
// error so we can check it at the end to see if recovery succeeded or not
|
||||
if (recovery_in_prog_ && recovery_error_.ok()) {
|
||||
recovery_error_ = new_bg_err;
|
||||
}
|
||||
|
||||
bool auto_recovery = auto_recovery_;
|
||||
if (new_bg_err.severity() >= Status::Severity::kFatalError && auto_recovery) {
|
||||
auto_recovery = false;
|
||||
;
|
||||
}
|
||||
|
||||
// Allow some error specific overrides
|
||||
|
@ -22,6 +22,21 @@ namespace rocksdb {
|
||||
class DBErrorHandlingTest : public DBTestBase {
|
||||
public:
|
||||
DBErrorHandlingTest() : DBTestBase("/db_error_handling_test") {}
|
||||
|
||||
std::string GetManifestNameFromLiveFiles() {
|
||||
std::vector<std::string> live_files;
|
||||
uint64_t manifest_size;
|
||||
|
||||
dbfull()->GetLiveFiles(live_files, &manifest_size, false);
|
||||
for (auto& file : live_files) {
|
||||
uint64_t num = 0;
|
||||
FileType type;
|
||||
if (ParseFileName(file, &num, &type) && type == kDescriptorFile) {
|
||||
return file;
|
||||
}
|
||||
}
|
||||
return "";
|
||||
}
|
||||
};
|
||||
|
||||
class DBErrorHandlingEnv : public EnvWrapper {
|
||||
@ -161,6 +176,169 @@ TEST_F(DBErrorHandlingTest, FLushWriteError) {
|
||||
Destroy(options);
|
||||
}
|
||||
|
||||
TEST_F(DBErrorHandlingTest, ManifestWriteError) {
|
||||
std::unique_ptr<FaultInjectionTestEnv> fault_env(
|
||||
new FaultInjectionTestEnv(Env::Default()));
|
||||
std::shared_ptr<ErrorHandlerListener> listener(new ErrorHandlerListener());
|
||||
Options options = GetDefaultOptions();
|
||||
options.create_if_missing = true;
|
||||
options.env = fault_env.get();
|
||||
options.listeners.emplace_back(listener);
|
||||
Status s;
|
||||
std::string old_manifest;
|
||||
std::string new_manifest;
|
||||
|
||||
listener->EnableAutoRecovery(false);
|
||||
DestroyAndReopen(options);
|
||||
old_manifest = GetManifestNameFromLiveFiles();
|
||||
|
||||
Put(Key(0), "val");
|
||||
Flush();
|
||||
Put(Key(1), "val");
|
||||
SyncPoint::GetInstance()->SetCallBack(
|
||||
"VersionSet::LogAndApply:WriteManifest", [&](void *) {
|
||||
fault_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
|
||||
});
|
||||
SyncPoint::GetInstance()->EnableProcessing();
|
||||
s = Flush();
|
||||
ASSERT_EQ(s.severity(), rocksdb::Status::Severity::kHardError);
|
||||
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||
SyncPoint::GetInstance()->DisableProcessing();
|
||||
fault_env->SetFilesystemActive(true);
|
||||
s = dbfull()->Resume();
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
|
||||
new_manifest = GetManifestNameFromLiveFiles();
|
||||
ASSERT_NE(new_manifest, old_manifest);
|
||||
|
||||
Reopen(options);
|
||||
ASSERT_EQ("val", Get(Key(0)));
|
||||
ASSERT_EQ("val", Get(Key(1)));
|
||||
Close();
|
||||
}
|
||||
|
||||
TEST_F(DBErrorHandlingTest, DoubleManifestWriteError) {
|
||||
std::unique_ptr<FaultInjectionTestEnv> fault_env(
|
||||
new FaultInjectionTestEnv(Env::Default()));
|
||||
std::shared_ptr<ErrorHandlerListener> listener(new ErrorHandlerListener());
|
||||
Options options = GetDefaultOptions();
|
||||
options.create_if_missing = true;
|
||||
options.env = fault_env.get();
|
||||
options.listeners.emplace_back(listener);
|
||||
Status s;
|
||||
std::string old_manifest;
|
||||
std::string new_manifest;
|
||||
|
||||
listener->EnableAutoRecovery(false);
|
||||
DestroyAndReopen(options);
|
||||
old_manifest = GetManifestNameFromLiveFiles();
|
||||
|
||||
Put(Key(0), "val");
|
||||
Flush();
|
||||
Put(Key(1), "val");
|
||||
SyncPoint::GetInstance()->SetCallBack(
|
||||
"VersionSet::LogAndApply:WriteManifest", [&](void *) {
|
||||
fault_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
|
||||
});
|
||||
SyncPoint::GetInstance()->EnableProcessing();
|
||||
s = Flush();
|
||||
ASSERT_EQ(s.severity(), rocksdb::Status::Severity::kHardError);
|
||||
fault_env->SetFilesystemActive(true);
|
||||
|
||||
// This Resume() will attempt to create a new manifest file and fail again
|
||||
s = dbfull()->Resume();
|
||||
ASSERT_EQ(s.severity(), rocksdb::Status::Severity::kHardError);
|
||||
fault_env->SetFilesystemActive(true);
|
||||
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||
SyncPoint::GetInstance()->DisableProcessing();
|
||||
|
||||
// A successful Resume() will create a new manifest file
|
||||
s = dbfull()->Resume();
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
|
||||
new_manifest = GetManifestNameFromLiveFiles();
|
||||
ASSERT_NE(new_manifest, old_manifest);
|
||||
|
||||
Reopen(options);
|
||||
ASSERT_EQ("val", Get(Key(0)));
|
||||
ASSERT_EQ("val", Get(Key(1)));
|
||||
Close();
|
||||
}
|
||||
|
||||
TEST_F(DBErrorHandlingTest, CompactionManifestWriteError) {
|
||||
std::unique_ptr<FaultInjectionTestEnv> fault_env(
|
||||
new FaultInjectionTestEnv(Env::Default()));
|
||||
std::shared_ptr<ErrorHandlerListener> listener(new ErrorHandlerListener());
|
||||
Options options = GetDefaultOptions();
|
||||
options.create_if_missing = true;
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
options.listeners.emplace_back(listener);
|
||||
options.env = fault_env.get();
|
||||
Status s;
|
||||
std::string old_manifest;
|
||||
std::string new_manifest;
|
||||
std::atomic<bool> fail_manifest(false);
|
||||
DestroyAndReopen(options);
|
||||
old_manifest = GetManifestNameFromLiveFiles();
|
||||
|
||||
Put(Key(0), "val");
|
||||
Put(Key(2), "val");
|
||||
s = Flush();
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
|
||||
rocksdb::SyncPoint::GetInstance()->LoadDependency(
|
||||
// Wait for flush of 2nd L0 file before starting compaction
|
||||
{{"DBImpl::FlushMemTable:FlushMemTableFinished",
|
||||
"BackgroundCallCompaction:0"},
|
||||
// Wait for compaction to detect manifest write error
|
||||
{"BackgroundCallCompaction:1",
|
||||
"CompactionManifestWriteError:0"},
|
||||
// Make compaction thread wait for error to be cleared
|
||||
{"CompactionManifestWriteError:1",
|
||||
"DBImpl::BackgroundCallCompaction:FoundObsoleteFiles"},
|
||||
// Wait for DB instance to clear bg_error before calling
|
||||
// TEST_WaitForCompact
|
||||
{"SstFileManagerImpl::ClearError",
|
||||
"CompactionManifestWriteError:2"}});
|
||||
// trigger manifest write failure in compaction thread
|
||||
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
||||
"BackgroundCallCompaction:0", [&](void *) {
|
||||
fail_manifest.store(true);
|
||||
});
|
||||
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
||||
"VersionSet::LogAndApply:WriteManifest", [&](void *) {
|
||||
if (fail_manifest.load()) {
|
||||
fault_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
|
||||
}
|
||||
});
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Put(Key(1), "val");
|
||||
// This Flush will trigger a compaction, which will fail when appending to
|
||||
// the manifest
|
||||
s = Flush();
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
|
||||
TEST_SYNC_POINT("CompactionManifestWriteError:0");
|
||||
// Clear all errors so when the compaction is retried, it will succeed
|
||||
fault_env->SetFilesystemActive(true);
|
||||
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||
TEST_SYNC_POINT("CompactionManifestWriteError:1");
|
||||
TEST_SYNC_POINT("CompactionManifestWriteError:2");
|
||||
|
||||
s = dbfull()->TEST_WaitForCompact();
|
||||
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
||||
ASSERT_EQ(s, Status::OK());
|
||||
|
||||
new_manifest = GetManifestNameFromLiveFiles();
|
||||
ASSERT_NE(new_manifest, old_manifest);
|
||||
Reopen(options);
|
||||
ASSERT_EQ("val", Get(Key(0)));
|
||||
ASSERT_EQ("val", Get(Key(1)));
|
||||
ASSERT_EQ("val", Get(Key(2)));
|
||||
Close();
|
||||
}
|
||||
|
||||
TEST_F(DBErrorHandlingTest, CompactionWriteError) {
|
||||
std::unique_ptr<FaultInjectionTestEnv> fault_env(
|
||||
new FaultInjectionTestEnv(Env::Default()));
|
||||
|
@ -485,6 +485,7 @@ Status TableCache::MultiGet(const ReadOptions& options,
|
||||
file_options_, internal_comparator, fd, &handle, prefix_extractor,
|
||||
options.read_tier == kBlockCacheTier /* no_io */,
|
||||
true /* record_read_stats */, file_read_hist, skip_filters, level);
|
||||
TEST_SYNC_POINT_CALLBACK("TableCache::MultiGet:FindTable", &s);
|
||||
if (s.ok()) {
|
||||
t = GetTableReaderFromHandle(handle);
|
||||
assert(t);
|
||||
|
@ -1918,6 +1918,11 @@ void Version::MultiGet(const ReadOptions& read_options, MultiGetRange* range,
|
||||
&iter->max_covering_tombstone_seq, this->env_, nullptr,
|
||||
merge_operator_ ? &pinned_iters_mgr : nullptr, callback, is_blob,
|
||||
tracing_mget_id);
|
||||
// MergeInProgress status, if set, has been transferred to the get_context
|
||||
// state, so we set status to ok here. From now on, the iter status will
|
||||
// be used for IO errors, and get_context state will be used for any
|
||||
// key level errors
|
||||
*(iter->s) = Status::OK();
|
||||
}
|
||||
int get_ctx_index = 0;
|
||||
for (auto iter = range->begin(); iter != range->end();
|
||||
@ -1962,6 +1967,15 @@ void Version::MultiGet(const ReadOptions& read_options, MultiGetRange* range,
|
||||
for (auto iter = file_range.begin(); iter != file_range.end(); ++iter) {
|
||||
GetContext& get_context = *iter->get_context;
|
||||
Status* status = iter->s;
|
||||
// The Status in the KeyContext takes precedence over GetContext state
|
||||
// Status may be an error if there were any IO errors in the table
|
||||
// reader. We never expect Status to be NotFound(), as that is
|
||||
// determined by get_context
|
||||
assert(!status->IsNotFound());
|
||||
if (!status->ok()) {
|
||||
file_range.MarkKeyDone(iter);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (get_context.sample()) {
|
||||
sample_file_read_inc(f->file_metadata);
|
||||
@ -3953,12 +3967,15 @@ Status VersionSet::ProcessManifestWrites(
|
||||
for (auto v : versions) {
|
||||
delete v;
|
||||
}
|
||||
// If manifest append failed for whatever reason, the file could be
|
||||
// corrupted. So we need to force the next version update to start a
|
||||
// new manifest file.
|
||||
descriptor_log_.reset();
|
||||
if (new_descriptor_log) {
|
||||
ROCKS_LOG_INFO(db_options_->info_log,
|
||||
"Deleting manifest %" PRIu64 " current manifest %" PRIu64
|
||||
"\n",
|
||||
manifest_file_number_, pending_manifest_file_number_);
|
||||
descriptor_log_.reset();
|
||||
env_->DeleteFile(
|
||||
DescriptorFileName(dbname_, pending_manifest_file_number_));
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ void InitializeHotKeyGenerator(double alpha) {
|
||||
int64_t GetOneHotKeyID(double rand_seed, int64_t max_key) {
|
||||
int64_t low = 1, mid, high = zipf_sum_size, zipf = 0;
|
||||
while (low <= high) {
|
||||
mid = std::floor((low + high) / 2);
|
||||
mid = static_cast<int64_t>(std::floor((low + high) / 2));
|
||||
if (sum_probs[mid] >= rand_seed && sum_probs[mid - 1] < rand_seed) {
|
||||
zipf = mid;
|
||||
break;
|
||||
|
@ -771,7 +771,7 @@ std::vector<std::string> StressTest::GetWhiteBoxKeys(ThreadState* thread,
|
||||
k[i] = static_cast<char>(cur - 1);
|
||||
break;
|
||||
} else if (i > 0) {
|
||||
k[i] = 0xFF;
|
||||
k[i] = static_cast<char>(0xFF);
|
||||
}
|
||||
}
|
||||
} else if (thread->rand.OneIn(2)) {
|
||||
|
26
env/env_test.cc
vendored
26
env/env_test.cc
vendored
@ -1130,8 +1130,25 @@ TEST_P(EnvPosixTestWithParam, MultiRead) {
|
||||
ASSERT_OK(wfile->Close());
|
||||
}
|
||||
|
||||
// Random Read
|
||||
{
|
||||
// More attempts to simulate more partial result sequences.
|
||||
for (uint32_t attempt = 0; attempt < 20; attempt++) {
|
||||
// Random Read
|
||||
Random rnd(301 + attempt);
|
||||
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
||||
"PosixRandomAccessFile::MultiRead:io_uring_result", [&](void* arg) {
|
||||
if (attempt > 0) {
|
||||
// No failure in the first attempt.
|
||||
size_t& bytes_read = *static_cast<size_t*>(arg);
|
||||
if (rnd.OneIn(4)) {
|
||||
bytes_read = 0;
|
||||
} else if (rnd.OneIn(3)) {
|
||||
bytes_read = static_cast<size_t>(
|
||||
rnd.Uniform(static_cast<int>(bytes_read)));
|
||||
}
|
||||
}
|
||||
});
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
std::unique_ptr<RandomAccessFile> file;
|
||||
std::vector<ReadRequest> reqs(3);
|
||||
std::vector<std::unique_ptr<char, Deleter>> data;
|
||||
@ -1156,6 +1173,7 @@ TEST_P(EnvPosixTestWithParam, MultiRead) {
|
||||
ASSERT_OK(reqs[i].status);
|
||||
ASSERT_EQ(memcmp(reqs[i].scratch, buf.get(), kSectorSize), 0);
|
||||
}
|
||||
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1186,7 +1204,7 @@ TEST_P(EnvPosixTestWithParam, InvalidateCache) {
|
||||
ASSERT_OK(wfile->Append(slice));
|
||||
ASSERT_OK(wfile->InvalidateCache(0, 0));
|
||||
ASSERT_OK(wfile->Close());
|
||||
}
|
||||
}
|
||||
|
||||
// Random Read
|
||||
{
|
||||
@ -1430,7 +1448,7 @@ TEST_P(EnvPosixTestWithParam, ConsistentChildrenAttributes) {
|
||||
Slice buf(buf_ptr.get(), data.size());
|
||||
file->Append(buf);
|
||||
data.append(std::string(4096, 'T'));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Env::FileAttributes> file_attrs;
|
||||
ASSERT_OK(env_->GetChildrenFileAttributes(test::TmpDir(env_), &file_attrs));
|
||||
|
80
env/io_posix.cc
vendored
80
env/io_posix.cc
vendored
@ -482,9 +482,6 @@ IOStatus PosixRandomAccessFile::MultiRead(FSReadRequest* reqs,
|
||||
const IOOptions& options,
|
||||
IODebugContext* dbg) {
|
||||
#if defined(ROCKSDB_IOURING_PRESENT)
|
||||
size_t reqs_off;
|
||||
ssize_t ret __attribute__((__unused__));
|
||||
|
||||
struct io_uring* iu = nullptr;
|
||||
if (thread_local_io_urings_) {
|
||||
iu = static_cast<struct io_uring*>(thread_local_io_urings_->Get());
|
||||
@ -505,35 +502,49 @@ IOStatus PosixRandomAccessFile::MultiRead(FSReadRequest* reqs,
|
||||
struct WrappedReadRequest {
|
||||
FSReadRequest* req;
|
||||
struct iovec iov;
|
||||
explicit WrappedReadRequest(FSReadRequest* r) : req(r) {}
|
||||
size_t finished_len;
|
||||
explicit WrappedReadRequest(FSReadRequest* r) : req(r), finished_len(0) {}
|
||||
};
|
||||
|
||||
autovector<WrappedReadRequest, 32> req_wraps;
|
||||
autovector<WrappedReadRequest*, 4> incomplete_rq_list;
|
||||
|
||||
for (size_t i = 0; i < num_reqs; i++) {
|
||||
req_wraps.emplace_back(&reqs[i]);
|
||||
}
|
||||
|
||||
reqs_off = 0;
|
||||
while (num_reqs) {
|
||||
size_t this_reqs = num_reqs;
|
||||
size_t reqs_off = 0;
|
||||
while (num_reqs > reqs_off || !incomplete_rq_list.empty()) {
|
||||
size_t this_reqs = (num_reqs - reqs_off) + incomplete_rq_list.size();
|
||||
|
||||
// If requests exceed depth, split it into batches
|
||||
if (this_reqs > kIoUringDepth) this_reqs = kIoUringDepth;
|
||||
|
||||
assert(incomplete_rq_list.size() <= this_reqs);
|
||||
for (size_t i = 0; i < this_reqs; i++) {
|
||||
size_t index = i + reqs_off;
|
||||
WrappedReadRequest* rep_to_submit;
|
||||
if (i < incomplete_rq_list.size()) {
|
||||
rep_to_submit = incomplete_rq_list[i];
|
||||
} else {
|
||||
rep_to_submit = &req_wraps[reqs_off++];
|
||||
}
|
||||
assert(rep_to_submit->req->len > rep_to_submit->finished_len);
|
||||
rep_to_submit->iov.iov_base =
|
||||
rep_to_submit->req->scratch + rep_to_submit->finished_len;
|
||||
rep_to_submit->iov.iov_len =
|
||||
rep_to_submit->req->len - rep_to_submit->finished_len;
|
||||
|
||||
struct io_uring_sqe* sqe;
|
||||
|
||||
sqe = io_uring_get_sqe(iu);
|
||||
req_wraps[index].iov.iov_base = reqs[index].scratch;
|
||||
req_wraps[index].iov.iov_len = reqs[index].len;
|
||||
io_uring_prep_readv(sqe, fd_, &req_wraps[index].iov, 1,
|
||||
reqs[index].offset);
|
||||
io_uring_sqe_set_data(sqe, &req_wraps[index]);
|
||||
io_uring_prep_readv(
|
||||
sqe, fd_, &rep_to_submit->iov, 1,
|
||||
rep_to_submit->req->offset + rep_to_submit->finished_len);
|
||||
io_uring_sqe_set_data(sqe, rep_to_submit);
|
||||
}
|
||||
incomplete_rq_list.clear();
|
||||
|
||||
ret = io_uring_submit_and_wait(iu, static_cast<unsigned int>(this_reqs));
|
||||
ssize_t ret =
|
||||
io_uring_submit_and_wait(iu, static_cast<unsigned int>(this_reqs));
|
||||
if (static_cast<size_t>(ret) != this_reqs) {
|
||||
fprintf(stderr, "ret = %ld this_reqs: %ld\n", (long)ret, (long)this_reqs);
|
||||
}
|
||||
@ -547,21 +558,44 @@ IOStatus PosixRandomAccessFile::MultiRead(FSReadRequest* reqs,
|
||||
// of our initial wait not reaping all completions
|
||||
ret = io_uring_wait_cqe(iu, &cqe);
|
||||
assert(!ret);
|
||||
|
||||
req_wrap = static_cast<WrappedReadRequest*>(io_uring_cqe_get_data(cqe));
|
||||
FSReadRequest* req = req_wrap->req;
|
||||
if (static_cast<size_t>(cqe->res) == req_wrap->iov.iov_len) {
|
||||
req->result = Slice(req->scratch, cqe->res);
|
||||
req->status = IOStatus::OK();
|
||||
} else if (cqe->res >= 0) {
|
||||
req->result = Slice(req->scratch, req_wrap->iov.iov_len - cqe->res);
|
||||
} else {
|
||||
if (cqe->res < 0) {
|
||||
req->result = Slice(req->scratch, 0);
|
||||
req->status = IOError("Req failed", filename_, cqe->res);
|
||||
} else {
|
||||
size_t bytes_read = static_cast<size_t>(cqe->res);
|
||||
TEST_SYNC_POINT_CALLBACK(
|
||||
"PosixRandomAccessFile::MultiRead:io_uring_result", &bytes_read);
|
||||
if (bytes_read == req_wrap->iov.iov_len) {
|
||||
req->result = Slice(req->scratch, req->len);
|
||||
req->status = IOStatus::OK();
|
||||
} else if (bytes_read == 0) {
|
||||
// cqe->res == 0 can means EOF, or can mean partial results. See
|
||||
// comment
|
||||
// https://github.com/facebook/rocksdb/pull/6441#issuecomment-589843435
|
||||
// Fall back to pread in this case.
|
||||
Slice tmp_slice;
|
||||
req->status =
|
||||
Read(req->offset + req_wrap->finished_len,
|
||||
req->len - req_wrap->finished_len, options, &tmp_slice,
|
||||
req->scratch + req_wrap->finished_len, dbg);
|
||||
req->result =
|
||||
Slice(req->scratch, req_wrap->finished_len + tmp_slice.size());
|
||||
} else if (bytes_read < req_wrap->iov.iov_len) {
|
||||
assert(bytes_read > 0);
|
||||
assert(bytes_read + req_wrap->finished_len < req->len);
|
||||
req_wrap->finished_len += bytes_read;
|
||||
incomplete_rq_list.push_back(req_wrap);
|
||||
} else {
|
||||
req->result = Slice(req->scratch, 0);
|
||||
req->status = IOError("Req returned more bytes than requested",
|
||||
filename_, cqe->res);
|
||||
}
|
||||
}
|
||||
io_uring_cqe_seen(iu, cqe);
|
||||
}
|
||||
num_reqs -= this_reqs;
|
||||
reqs_off += this_reqs;
|
||||
}
|
||||
return IOStatus::OK();
|
||||
#else
|
||||
|
@ -308,6 +308,7 @@ void SstFileManagerImpl::ClearError() {
|
||||
// since the ErrorHandler::recovery_in_prog_ flag would be true
|
||||
cur_instance_ = error_handler;
|
||||
mu_.Unlock();
|
||||
TEST_SYNC_POINT("SstFileManagerImpl::ClearError");
|
||||
s = error_handler->RecoverFromBGError();
|
||||
mu_.Lock();
|
||||
// The DB instance might have been deleted while we were
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#define ROCKSDB_MAJOR 6
|
||||
#define ROCKSDB_MINOR 7
|
||||
#define ROCKSDB_PATCH 0
|
||||
#define ROCKSDB_PATCH 3
|
||||
|
||||
// Do not use these. We made the mistake of declaring macros starting with
|
||||
// double underscore. Now we have to live with our choice. We'll deprecate these
|
||||
|
@ -313,17 +313,17 @@ if(NOT EXISTS ${JAVA_TEST_LIBDIR})
|
||||
file(MAKE_DIRECTORY mkdir ${JAVA_TEST_LIBDIR})
|
||||
endif()
|
||||
|
||||
if (DEFINED CUSTOM_REPO_URL)
|
||||
set(SEARCH_REPO_URL ${CUSTOM_REPO_URL}/)
|
||||
set(CENTRAL_REPO_URL ${CUSTOM_REPO_URL}/)
|
||||
if (DEFINED CUSTOM_DEPS_URL)
|
||||
set(DEPS_URL ${CUSTOM_DEPS_URL}/)
|
||||
else ()
|
||||
set(SEARCH_REPO_URL "http://search.maven.org/remotecontent?filepath=")
|
||||
set(CENTRAL_REPO_URL "https://repo1.maven.org/maven2/")
|
||||
# Using a Facebook AWS account for S3 storage. (maven.org has a history
|
||||
# of failing in Travis builds.)
|
||||
set(DEPS_URL "https://rocksdb-deps.s3-us-west-2.amazonaws.com/jars")
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS ${JAVA_JUNIT_JAR})
|
||||
message("Downloading ${JAVA_JUNIT_JAR}")
|
||||
file(DOWNLOAD ${SEARCH_REPO_URL}junit/junit/4.12/junit-4.12.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
file(DOWNLOAD ${DEPS_URL}/junit-4.12.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
list(GET downloadStatus 0 error_code)
|
||||
if(NOT error_code EQUAL 0)
|
||||
message(FATAL_ERROR "Failed downloading ${JAVA_JUNIT_JAR}")
|
||||
@ -332,7 +332,7 @@ if(NOT EXISTS ${JAVA_JUNIT_JAR})
|
||||
endif()
|
||||
if(NOT EXISTS ${JAVA_HAMCR_JAR})
|
||||
message("Downloading ${JAVA_HAMCR_JAR}")
|
||||
file(DOWNLOAD ${SEARCH_REPO_URL}org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
file(DOWNLOAD ${DEPS_URL}/hamcrest-core-1.3.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
list(GET downloadStatus 0 error_code)
|
||||
if(NOT error_code EQUAL 0)
|
||||
message(FATAL_ERROR "Failed downloading ${JAVA_HAMCR_JAR}")
|
||||
@ -341,7 +341,7 @@ if(NOT EXISTS ${JAVA_HAMCR_JAR})
|
||||
endif()
|
||||
if(NOT EXISTS ${JAVA_MOCKITO_JAR})
|
||||
message("Downloading ${JAVA_MOCKITO_JAR}")
|
||||
file(DOWNLOAD ${SEARCH_REPO_URL}org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
file(DOWNLOAD ${DEPS_URL}/mockito-all-1.10.19.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
list(GET downloadStatus 0 error_code)
|
||||
if(NOT error_code EQUAL 0)
|
||||
message(FATAL_ERROR "Failed downloading ${JAVA_MOCKITO_JAR}")
|
||||
@ -350,7 +350,7 @@ if(NOT EXISTS ${JAVA_MOCKITO_JAR})
|
||||
endif()
|
||||
if(NOT EXISTS ${JAVA_CGLIB_JAR})
|
||||
message("Downloading ${JAVA_CGLIB_JAR}")
|
||||
file(DOWNLOAD ${SEARCH_REPO_URL}cglib/cglib/2.2.2/cglib-2.2.2.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
file(DOWNLOAD ${DEPS_URL}/cglib-2.2.2.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
list(GET downloadStatus 0 error_code)
|
||||
if(NOT error_code EQUAL 0)
|
||||
message(FATAL_ERROR "Failed downloading ${JAVA_CGLIB_JAR}")
|
||||
@ -359,7 +359,7 @@ if(NOT EXISTS ${JAVA_CGLIB_JAR})
|
||||
endif()
|
||||
if(NOT EXISTS ${JAVA_ASSERTJ_JAR})
|
||||
message("Downloading ${JAVA_ASSERTJ_JAR}")
|
||||
file(DOWNLOAD ${CENTRAL_REPO_URL}org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
file(DOWNLOAD ${DEPS_URL}/assertj-core-1.7.1.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
|
||||
list(GET downloadStatus 0 error_code)
|
||||
if(NOT error_code EQUAL 0)
|
||||
message(FATAL_ERROR "Failed downloading ${JAVA_ASSERTJ_JAR}")
|
||||
|
@ -213,16 +213,21 @@ ifneq ($(DEBUG_LEVEL),0)
|
||||
JAVAC_ARGS = -Xlint:deprecation -Xlint:unchecked
|
||||
endif
|
||||
|
||||
SEARCH_REPO_URL?=http://search.maven.org/remotecontent?filepath=
|
||||
CENTRAL_REPO_URL?=https://repo1.maven.org/maven2/
|
||||
# Using a Facebook AWS account for S3 storage. (maven.org has a history
|
||||
# of failing in Travis builds.)
|
||||
DEPS_URL?=https://rocksdb-deps.s3-us-west-2.amazonaws.com/jars
|
||||
|
||||
clean:
|
||||
$(AM_V_at)rm -rf include/*
|
||||
$(AM_V_at)rm -rf test-libs/
|
||||
clean: clean-not-downloaded clean-downloaded
|
||||
|
||||
clean-not-downloaded:
|
||||
$(AM_V_at)rm -rf $(NATIVE_INCLUDE)
|
||||
$(AM_V_at)rm -rf $(OUTPUT)
|
||||
$(AM_V_at)rm -rf $(BENCHMARK_OUTPUT)
|
||||
$(AM_V_at)rm -rf $(SAMPLES_OUTPUT)
|
||||
|
||||
clean-downloaded:
|
||||
$(AM_V_at)rm -rf $(JAVA_TEST_LIBDIR)
|
||||
|
||||
|
||||
javadocs: java
|
||||
$(AM_V_GEN)mkdir -p $(JAVADOC)
|
||||
@ -279,11 +284,11 @@ optimistic_transaction_sample: java
|
||||
|
||||
resolve_test_deps:
|
||||
test -d "$(JAVA_TEST_LIBDIR)" || mkdir -p "$(JAVA_TEST_LIBDIR)"
|
||||
test -s "$(JAVA_JUNIT_JAR)" || cp $(MVN_LOCAL)/junit/junit/4.12/junit-4.12.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_JUNIT_JAR) $(SEARCH_REPO_URL)junit/junit/4.12/junit-4.12.jar
|
||||
test -s "$(JAVA_HAMCR_JAR)" || cp $(MVN_LOCAL)/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_HAMCR_JAR) $(SEARCH_REPO_URL)org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar
|
||||
test -s "$(JAVA_MOCKITO_JAR)" || cp $(MVN_LOCAL)/org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_MOCKITO_JAR)" $(SEARCH_REPO_URL)org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar
|
||||
test -s "$(JAVA_CGLIB_JAR)" || cp $(MVN_LOCAL)/cglib/cglib/2.2.2/cglib-2.2.2.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_CGLIB_JAR)" $(SEARCH_REPO_URL)cglib/cglib/2.2.2/cglib-2.2.2.jar
|
||||
test -s "$(JAVA_ASSERTJ_JAR)" || cp $(MVN_LOCAL)/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_ASSERTJ_JAR)" $(CENTRAL_REPO_URL)org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar
|
||||
test -s "$(JAVA_JUNIT_JAR)" || cp $(MVN_LOCAL)/junit/junit/4.12/junit-4.12.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output $(JAVA_JUNIT_JAR) --location $(DEPS_URL)/junit-4.12.jar
|
||||
test -s "$(JAVA_HAMCR_JAR)" || cp $(MVN_LOCAL)/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output $(JAVA_HAMCR_JAR) --location $(DEPS_URL)/hamcrest-core-1.3.jar
|
||||
test -s "$(JAVA_MOCKITO_JAR)" || cp $(MVN_LOCAL)/org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output "$(JAVA_MOCKITO_JAR)" --location $(DEPS_URL)/mockito-all-1.10.19.jar
|
||||
test -s "$(JAVA_CGLIB_JAR)" || cp $(MVN_LOCAL)/cglib/cglib/2.2.2/cglib-2.2.2.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output "$(JAVA_CGLIB_JAR)" --location $(DEPS_URL)/cglib-2.2.2.jar
|
||||
test -s "$(JAVA_ASSERTJ_JAR)" || cp $(MVN_LOCAL)/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output "$(JAVA_ASSERTJ_JAR)" --location $(DEPS_URL)/assertj-core-1.7.1.jar
|
||||
|
||||
java_test: java resolve_test_deps
|
||||
$(AM_V_GEN)mkdir -p $(TEST_CLASSES)
|
||||
|
@ -390,19 +390,10 @@ void IndexBlockIter::Seek(const Slice& target) {
|
||||
if (data_ == nullptr) { // Not init yet
|
||||
return;
|
||||
}
|
||||
status_ = Status::OK();
|
||||
uint32_t index = 0;
|
||||
bool ok = false;
|
||||
if (prefix_index_) {
|
||||
bool prefix_may_exist = true;
|
||||
ok = PrefixSeek(target, &index, &prefix_may_exist);
|
||||
if (!prefix_may_exist) {
|
||||
// This is to let the caller to distinguish between non-existing prefix,
|
||||
// and when key is larger than the last key, which both set Valid() to
|
||||
// false.
|
||||
current_ = restarts_;
|
||||
status_ = Status::NotFound();
|
||||
}
|
||||
ok = PrefixSeek(target, &index);
|
||||
} else if (value_delta_encoded_) {
|
||||
ok = BinarySeek<DecodeKeyV4>(seek_key, 0, num_restarts_ - 1, &index,
|
||||
comparator_);
|
||||
@ -471,7 +462,6 @@ void IndexBlockIter::SeekToFirst() {
|
||||
if (data_ == nullptr) { // Not init yet
|
||||
return;
|
||||
}
|
||||
status_ = Status::OK();
|
||||
SeekToRestartPoint(0);
|
||||
ParseNextIndexKey();
|
||||
}
|
||||
@ -490,7 +480,6 @@ void IndexBlockIter::SeekToLast() {
|
||||
if (data_ == nullptr) { // Not init yet
|
||||
return;
|
||||
}
|
||||
status_ = Status::OK();
|
||||
SeekToRestartPoint(num_restarts_ - 1);
|
||||
while (ParseNextIndexKey() && NextEntryOffset() < restarts_) {
|
||||
// Keep skipping
|
||||
@ -729,12 +718,8 @@ int IndexBlockIter::CompareBlockKey(uint32_t block_index, const Slice& target) {
|
||||
// with a key >= target
|
||||
bool IndexBlockIter::BinaryBlockIndexSeek(const Slice& target,
|
||||
uint32_t* block_ids, uint32_t left,
|
||||
uint32_t right, uint32_t* index,
|
||||
bool* prefix_may_exist) {
|
||||
uint32_t right, uint32_t* index) {
|
||||
assert(left <= right);
|
||||
assert(index);
|
||||
assert(prefix_may_exist);
|
||||
*prefix_may_exist = true;
|
||||
uint32_t left_bound = left;
|
||||
|
||||
while (left <= right) {
|
||||
@ -768,7 +753,6 @@ bool IndexBlockIter::BinaryBlockIndexSeek(const Slice& target,
|
||||
(left == left_bound || block_ids[left - 1] != block_ids[left] - 1) &&
|
||||
CompareBlockKey(block_ids[left] - 1, target) > 0) {
|
||||
current_ = restarts_;
|
||||
*prefix_may_exist = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -776,45 +760,14 @@ bool IndexBlockIter::BinaryBlockIndexSeek(const Slice& target,
|
||||
return true;
|
||||
} else {
|
||||
assert(left > right);
|
||||
|
||||
// If the next block key is larger than seek key, it is possible that
|
||||
// no key shares the prefix with `target`, or all keys with the same
|
||||
// prefix as `target` are smaller than prefix. In the latter case,
|
||||
// we are mandated to set the position the same as the total order.
|
||||
// In the latter case, either:
|
||||
// (1) `target` falls into the range of the next block. In this case,
|
||||
// we can place the iterator to the next block, or
|
||||
// (2) `target` is larger than all block keys. In this case we can
|
||||
// keep the iterator invalidate without setting `prefix_may_exist`
|
||||
// to false.
|
||||
// We might sometimes end up with setting the total order position
|
||||
// while there is no key sharing the prefix as `target`, but it
|
||||
// still follows the contract.
|
||||
uint32_t right_index = block_ids[right];
|
||||
assert(right_index + 1 <= num_restarts_);
|
||||
if (right_index + 1 < num_restarts_) {
|
||||
if (CompareBlockKey(right_index + 1, target) >= 0) {
|
||||
*index = right_index + 1;
|
||||
return true;
|
||||
} else {
|
||||
// We have to set the flag here because we are not positioning
|
||||
// the iterator to the total order position.
|
||||
*prefix_may_exist = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Mark iterator invalid
|
||||
current_ = restarts_;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool IndexBlockIter::PrefixSeek(const Slice& target, uint32_t* index,
|
||||
bool* prefix_may_exist) {
|
||||
assert(index);
|
||||
assert(prefix_may_exist);
|
||||
bool IndexBlockIter::PrefixSeek(const Slice& target, uint32_t* index) {
|
||||
assert(prefix_index_);
|
||||
*prefix_may_exist = true;
|
||||
Slice seek_key = target;
|
||||
if (!key_includes_seq_) {
|
||||
seek_key = ExtractUserKey(target);
|
||||
@ -824,12 +777,9 @@ bool IndexBlockIter::PrefixSeek(const Slice& target, uint32_t* index,
|
||||
|
||||
if (num_blocks == 0) {
|
||||
current_ = restarts_;
|
||||
*prefix_may_exist = false;
|
||||
return false;
|
||||
} else {
|
||||
assert(block_ids);
|
||||
return BinaryBlockIndexSeek(seek_key, block_ids, 0, num_blocks - 1, index,
|
||||
prefix_may_exist);
|
||||
return BinaryBlockIndexSeek(seek_key, block_ids, 0, num_blocks - 1, index);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,13 +539,6 @@ class IndexBlockIter final : public BlockIter<IndexValue> {
|
||||
}
|
||||
}
|
||||
|
||||
// IndexBlockIter follows a different contract for prefix iterator
|
||||
// from data iterators.
|
||||
// If prefix of the seek key `target` exists in the file, it must
|
||||
// return the same result as total order seek.
|
||||
// If the prefix of `target` doesn't exist in the file, it can either
|
||||
// return the result of total order seek, or set both of Valid() = false
|
||||
// and status() = NotFound().
|
||||
virtual void Seek(const Slice& target) override;
|
||||
|
||||
virtual void SeekForPrev(const Slice&) override {
|
||||
@ -602,16 +595,9 @@ class IndexBlockIter final : public BlockIter<IndexValue> {
|
||||
|
||||
std::unique_ptr<GlobalSeqnoState> global_seqno_state_;
|
||||
|
||||
// Set *prefix_may_exist to false if no key possibly share the same prefix
|
||||
// as `target`. If not set, the result position should be the same as total
|
||||
// order Seek.
|
||||
bool PrefixSeek(const Slice& target, uint32_t* index, bool* prefix_may_exist);
|
||||
// Set *prefix_may_exist to false if no key can possibly share the same
|
||||
// prefix as `target`. If not set, the result position should be the same
|
||||
// as total order seek.
|
||||
bool PrefixSeek(const Slice& target, uint32_t* index);
|
||||
bool BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids,
|
||||
uint32_t left, uint32_t right, uint32_t* index,
|
||||
bool* prefix_may_exist);
|
||||
uint32_t left, uint32_t right, uint32_t* index);
|
||||
inline int CompareBlockKey(uint32_t block_index, const Slice& target);
|
||||
|
||||
inline int Compare(const Slice& a, const Slice& b) const {
|
||||
|
@ -2455,6 +2455,7 @@ void BlockBasedTable::RetrieveMultipleBlocks(
|
||||
s = rocksdb::VerifyChecksum(footer.checksum(),
|
||||
req.result.data() + req_offset,
|
||||
handle.size() + 1, expected);
|
||||
TEST_SYNC_POINT_CALLBACK("RetrieveMultipleBlocks:VerifyChecksum", &s);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2901,22 +2902,12 @@ void BlockBasedTableIterator<TBlockIter, TValue>::SeekForPrev(
|
||||
index_iter_->Seek(target);
|
||||
|
||||
if (!index_iter_->Valid()) {
|
||||
auto seek_status = index_iter_->status();
|
||||
// Check for IO error
|
||||
if (!seek_status.IsNotFound() && !seek_status.ok()) {
|
||||
if (!index_iter_->status().ok()) {
|
||||
ResetDataIter();
|
||||
return;
|
||||
}
|
||||
|
||||
// With prefix index, Seek() returns NotFound if the prefix doesn't exist
|
||||
if (seek_status.IsNotFound()) {
|
||||
// Any key less than the target is fine for prefix seek
|
||||
ResetDataIter();
|
||||
return;
|
||||
} else {
|
||||
index_iter_->SeekToLast();
|
||||
}
|
||||
// Check for IO error
|
||||
index_iter_->SeekToLast();
|
||||
if (!index_iter_->Valid()) {
|
||||
ResetDataIter();
|
||||
return;
|
||||
@ -3467,7 +3458,7 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
|
||||
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
|
||||
rep_->level);
|
||||
}
|
||||
if (s.ok() && !iiter->status().IsNotFound()) {
|
||||
if (s.ok()) {
|
||||
s = iiter->status();
|
||||
}
|
||||
}
|
||||
|
@ -686,8 +686,7 @@ class BlockBasedTableIterator : public InternalIteratorBase<TValue> {
|
||||
return block_iter_.value();
|
||||
}
|
||||
Status status() const override {
|
||||
// Prefix index set status to NotFound when the prefix does not exist
|
||||
if (!index_iter_->status().ok() && !index_iter_->status().IsNotFound()) {
|
||||
if (!index_iter_->status().ok()) {
|
||||
return index_iter_->status();
|
||||
} else if (block_iter_points_to_real_block_) {
|
||||
return block_iter_.status();
|
||||
|
@ -1857,8 +1857,7 @@ void TableTest::IndexTest(BlockBasedTableOptions table_options) {
|
||||
auto key = prefixes[i] + "9";
|
||||
index_iter->Seek(InternalKey(key, 0, kTypeValue).Encode());
|
||||
|
||||
ASSERT_TRUE(index_iter->status().ok() || index_iter->status().IsNotFound());
|
||||
ASSERT_TRUE(!index_iter->status().IsNotFound() || !index_iter->Valid());
|
||||
ASSERT_OK(index_iter->status());
|
||||
if (i == prefixes.size() - 1) {
|
||||
// last key
|
||||
ASSERT_TRUE(!index_iter->Valid());
|
||||
@ -1885,19 +1884,6 @@ void TableTest::IndexTest(BlockBasedTableOptions table_options) {
|
||||
ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) < 0);
|
||||
}
|
||||
}
|
||||
for (const auto& prefix : non_exist_prefixes) {
|
||||
index_iter->SeekForPrev(InternalKey(prefix, 0, kTypeValue).Encode());
|
||||
// regular_iter->Seek(prefix);
|
||||
|
||||
ASSERT_OK(index_iter->status());
|
||||
// Seek to non-existing prefixes should yield either invalid, or a
|
||||
// key with prefix greater than the target.
|
||||
if (index_iter->Valid()) {
|
||||
Slice ukey = ExtractUserKey(index_iter->key());
|
||||
Slice ukey_prefix = options.prefix_extractor->Transform(ukey);
|
||||
ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) > 0);
|
||||
}
|
||||
}
|
||||
c.ResetTableReader();
|
||||
}
|
||||
|
||||
|
@ -1,21 +1,30 @@
|
||||
set(TOOLS
|
||||
set(CORE_TOOLS
|
||||
sst_dump.cc
|
||||
db_sanity_test.cc
|
||||
write_stress.cc
|
||||
ldb.cc
|
||||
db_repl_stress.cc
|
||||
dump/rocksdb_dump.cc
|
||||
dump/rocksdb_undump.cc)
|
||||
foreach(src ${TOOLS})
|
||||
ldb.cc)
|
||||
foreach(src ${CORE_TOOLS})
|
||||
get_filename_component(exename ${src} NAME_WE)
|
||||
add_executable(${exename}${ARTIFACT_SUFFIX}
|
||||
${src})
|
||||
target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${ROCKSDB_LIB})
|
||||
list(APPEND tool_deps ${exename})
|
||||
list(APPEND core_tool_deps ${exename})
|
||||
endforeach()
|
||||
|
||||
list(APPEND tool_deps)
|
||||
if(WITH_TOOLS)
|
||||
set(TOOLS
|
||||
db_sanity_test.cc
|
||||
write_stress.cc
|
||||
db_repl_stress.cc
|
||||
dump/rocksdb_dump.cc
|
||||
dump/rocksdb_undump.cc)
|
||||
foreach(src ${TOOLS})
|
||||
get_filename_component(exename ${src} NAME_WE)
|
||||
add_executable(${exename}${ARTIFACT_SUFFIX}
|
||||
${src})
|
||||
target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${ROCKSDB_LIB})
|
||||
list(APPEND tool_deps ${exename})
|
||||
endforeach()
|
||||
|
||||
add_custom_target(ldb_tests
|
||||
COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/ldb_tests.py
|
||||
DEPENDS ldb)
|
||||
add_custom_target(ldb_tests
|
||||
COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/ldb_tests.py
|
||||
DEPENDS ldb)
|
||||
endif()
|
||||
|
Loading…
Reference in New Issue
Block a user