More Makefile Cleanup (#7097)
Summary: Cleans up some of the dependencies on test code in the Makefile while building tools: - Moves the test::RandomString, DBBaseTest::RandomString into Random - Moves the test::RandomHumanReadableString into Random - Moves the DestroyDir method into file_utils - Moves the SetupSyncPointsToMockDirectIO into sync_point. - Moves the FaultInjection Env and FS classes under env These changes allow all of the tools to build without dependencies on test_util, thereby simplifying the build dependencies. By moving the FaultInjection code, the dependency in db_stress on different libraries for debug vs release was eliminated. Tested both release and debug builds via Make and CMake for both static and shared libraries. More work remains to clean up how the tools are built and remove some unnecessary dependencies. There is also more work that should be done to get the Makefile and CMake to align in their builds -- what is in the libraries and the sizes of the executables are different. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7097 Reviewed By: riversand963 Differential Revision: D22463160 Pulled By: pdillinger fbshipit-source-id: e19462b53324ab3f0b7c72459dbc73165cc382b2
This commit is contained in:
parent
82611ee25a
commit
c7c7b07f06
@ -751,6 +751,8 @@ set(SOURCES
|
|||||||
utilities/debug.cc
|
utilities/debug.cc
|
||||||
utilities/env_mirror.cc
|
utilities/env_mirror.cc
|
||||||
utilities/env_timed.cc
|
utilities/env_timed.cc
|
||||||
|
utilities/fault_injection_env.cc
|
||||||
|
utilities/fault_injection_fs.cc
|
||||||
utilities/leveldb_options/leveldb_options.cc
|
utilities/leveldb_options/leveldb_options.cc
|
||||||
utilities/memory/memory_util.cc
|
utilities/memory/memory_util.cc
|
||||||
utilities/merge_operators/bytesxor.cc
|
utilities/merge_operators/bytesxor.cc
|
||||||
@ -1172,8 +1174,6 @@ if(WITH_TESTS)
|
|||||||
db/db_test_util.cc
|
db/db_test_util.cc
|
||||||
monitoring/thread_status_updater_debug.cc
|
monitoring/thread_status_updater_debug.cc
|
||||||
table/mock_table.cc
|
table/mock_table.cc
|
||||||
test_util/fault_injection_test_env.cc
|
|
||||||
test_util/fault_injection_test_fs.cc
|
|
||||||
utilities/cassandra/test_utils.cc
|
utilities/cassandra/test_utils.cc
|
||||||
)
|
)
|
||||||
enable_testing()
|
enable_testing()
|
||||||
|
26
Makefile
26
Makefile
@ -636,18 +636,12 @@ LIBRARY=$(SHARED1)
|
|||||||
TEST_LIBRARY=$(SHARED_TEST_LIBRARY)
|
TEST_LIBRARY=$(SHARED_TEST_LIBRARY)
|
||||||
TOOLS_LIBRARY=$(SHARED_TOOLS_LIBRARY)
|
TOOLS_LIBRARY=$(SHARED_TOOLS_LIBRARY)
|
||||||
STRESS_LIBRARY=$(SHARED_STRESS_LIBRARY)
|
STRESS_LIBRARY=$(SHARED_STRESS_LIBRARY)
|
||||||
ifeq ($(DEBUG_LEVEL),0)
|
|
||||||
STRESS_LIBRARY_RUNTIME_DEPS=$(SHARED_TOOLS_LIBRARY)
|
|
||||||
else
|
|
||||||
STRESS_LIBRARY_RUNTIME_DEPS=$(SHARED_TEST_LIBRARY) $(SHARED_TOOLS_LIBRARY)
|
|
||||||
endif
|
|
||||||
CLOUD_LIBRARY=$(SHARED_CLOUD_LIBRARY)
|
CLOUD_LIBRARY=$(SHARED_CLOUD_LIBRARY)
|
||||||
else
|
else
|
||||||
LIBRARY=$(STATIC_LIBRARY)
|
LIBRARY=$(STATIC_LIBRARY)
|
||||||
TEST_LIBRARY=$(STATIC_TEST_LIBRARY)
|
TEST_LIBRARY=$(STATIC_TEST_LIBRARY)
|
||||||
TOOLS_LIBRARY=$(STATIC_TOOLS_LIBRARY)
|
TOOLS_LIBRARY=$(STATIC_TOOLS_LIBRARY)
|
||||||
STRESS_LIBRARY=$(STATIC_STRESS_LIBRARY)
|
STRESS_LIBRARY=$(STATIC_STRESS_LIBRARY)
|
||||||
STRESS_LIBRARY_RUNTIME_DEPS=
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
|
ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
|
||||||
@ -1166,29 +1160,23 @@ $(STATIC_TEST_LIBRARY): $(TEST_OBJECTS)
|
|||||||
$(AM_V_AR)rm -f $@ $(SHARED_TEST_LIBRARY)
|
$(AM_V_AR)rm -f $@ $(SHARED_TEST_LIBRARY)
|
||||||
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
||||||
|
|
||||||
$(STATIC_TOOLS_LIBRARY): $(BENCH_OBJECTS) $(TOOL_OBJECTS) $(TESTUTIL)
|
$(STATIC_TOOLS_LIBRARY): $(BENCH_OBJECTS) $(TOOL_OBJECTS)
|
||||||
$(AM_V_AR)rm -f $@ $(SHARED_TOOLS_LIBRARY)
|
$(AM_V_AR)rm -f $@ $(SHARED_TOOLS_LIBRARY)
|
||||||
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
||||||
|
|
||||||
ifeq ($(DEBUG_LEVEL),0)
|
$(STATIC_STRESS_LIBRARY): $(ANALYZE_OBJECTS) $(STRESS_OBJECTS)
|
||||||
$(STATIC_STRESS_LIBRARY): $(TESTUTIL) $(ANALYZE_OBJECTS) $(STRESS_OBJECTS)
|
|
||||||
$(AM_V_AR)rm -f $@ $(SHARED_STRESS_LIBRARY)
|
$(AM_V_AR)rm -f $@ $(SHARED_STRESS_LIBRARY)
|
||||||
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
||||||
else
|
|
||||||
$(STATIC_STRESS_LIBRARY): $(TEST_OBJECTS) $(ANALYZE_OBJECTS) $(STRESS_OBJECTS)
|
|
||||||
$(AM_V_AR)rm -f $@ $(SHARED_STRESS_LIBRARY)
|
|
||||||
$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
|
|
||||||
endif
|
|
||||||
|
|
||||||
$(SHARED_TEST_LIBRARY): $(TEST_OBJECTS) $(SHARED1)
|
$(SHARED_TEST_LIBRARY): $(TEST_OBJECTS) $(SHARED1)
|
||||||
$(AM_V_AR)rm -f $@ $(STATIC_TEST_LIBRARY)
|
$(AM_V_AR)rm -f $@ $(STATIC_TEST_LIBRARY)
|
||||||
$(AM_SHARE)
|
$(AM_SHARE)
|
||||||
|
|
||||||
$(SHARED_TOOLS_LIBRARY): $(TOOL_OBJECTS) $(TESTUTIL) $(SHARED1)
|
$(SHARED_TOOLS_LIBRARY): $(TOOL_OBJECTS) $(SHARED1)
|
||||||
$(AM_V_AR)rm -f $@ $(STATIC_TOOLS_LIBRARY)
|
$(AM_V_AR)rm -f $@ $(STATIC_TOOLS_LIBRARY)
|
||||||
$(AM_SHARE)
|
$(AM_SHARE)
|
||||||
|
|
||||||
$(SHARED_STRESS_LIBRARY): $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) $(STRESS_LIBRARY_RUNTIME_DEPS) $(SHARED1)
|
$(SHARED_STRESS_LIBRARY): $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) $(SHARED_TOOLS_LIBRARY) $(SHARED1)
|
||||||
$(AM_V_AR)rm -f $@ $(STATIC_STRESS_LIBRARY)
|
$(AM_V_AR)rm -f $@ $(STATIC_STRESS_LIBRARY)
|
||||||
$(AM_SHARE)
|
$(AM_SHARE)
|
||||||
|
|
||||||
@ -1216,13 +1204,13 @@ cache_bench: $(OBJ_DIR)/cache/cache_bench.o $(LIBRARY)
|
|||||||
persistent_cache_bench: $(OBJ_DIR)/utilities/persistent_cache/persistent_cache_bench.o $(LIBRARY)
|
persistent_cache_bench: $(OBJ_DIR)/utilities/persistent_cache/persistent_cache_bench.o $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
memtablerep_bench: $(OBJ_DIR)/memtable/memtablerep_bench.o $(TESTUTIL) $(LIBRARY)
|
memtablerep_bench: $(OBJ_DIR)/memtable/memtablerep_bench.o $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
filter_bench: $(OBJ_DIR)/util/filter_bench.o $(LIBRARY)
|
filter_bench: $(OBJ_DIR)/util/filter_bench.o $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
db_stress: $(OBJ_DIR)/db_stress_tool/db_stress.o $(STRESS_LIBRARY) $(STRESS_LIBRARY_RUNTIME_DEPS) $(LIBRARY)
|
db_stress: $(OBJ_DIR)/db_stress_tool/db_stress.o $(STRESS_LIBRARY) $(TOOLS_LIBRARY) $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
write_stress: $(OBJ_DIR)/tools/write_stress.o $(LIBRARY)
|
write_stress: $(OBJ_DIR)/tools/write_stress.o $(LIBRARY)
|
||||||
@ -1231,7 +1219,7 @@ write_stress: $(OBJ_DIR)/tools/write_stress.o $(LIBRARY)
|
|||||||
db_sanity_test: $(OBJ_DIR)/tools/db_sanity_test.o $(LIBRARY)
|
db_sanity_test: $(OBJ_DIR)/tools/db_sanity_test.o $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
db_repl_stress: $(OBJ_DIR)/tools/db_repl_stress.o $(TESTUTIL) $(LIBRARY)
|
db_repl_stress: $(OBJ_DIR)/tools/db_repl_stress.o $(LIBRARY)
|
||||||
$(AM_LINK)
|
$(AM_LINK)
|
||||||
|
|
||||||
arena_test: $(OBJ_DIR)/memory/arena_test.o $(TEST_LIBRARY) $(LIBRARY)
|
arena_test: $(OBJ_DIR)/memory/arena_test.o $(TEST_LIBRARY) $(LIBRARY)
|
||||||
|
4
TARGETS
4
TARGETS
@ -332,6 +332,8 @@ cpp_library(
|
|||||||
"utilities/debug.cc",
|
"utilities/debug.cc",
|
||||||
"utilities/env_mirror.cc",
|
"utilities/env_mirror.cc",
|
||||||
"utilities/env_timed.cc",
|
"utilities/env_timed.cc",
|
||||||
|
"utilities/fault_injection_env.cc",
|
||||||
|
"utilities/fault_injection_fs.cc",
|
||||||
"utilities/leveldb_options/leveldb_options.cc",
|
"utilities/leveldb_options/leveldb_options.cc",
|
||||||
"utilities/memory/memory_util.cc",
|
"utilities/memory/memory_util.cc",
|
||||||
"utilities/merge_operators/bytesxor.cc",
|
"utilities/merge_operators/bytesxor.cc",
|
||||||
@ -385,8 +387,6 @@ cpp_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"db/db_test_util.cc",
|
"db/db_test_util.cc",
|
||||||
"table/mock_table.cc",
|
"table/mock_table.cc",
|
||||||
"test_util/fault_injection_test_env.cc",
|
|
||||||
"test_util/fault_injection_test_fs.cc",
|
|
||||||
"test_util/testharness.cc",
|
"test_util/testharness.cc",
|
||||||
"test_util/testutil.cc",
|
"test_util/testutil.cc",
|
||||||
"tools/block_cache_analyzer/block_cache_trace_analyzer.cc",
|
"tools/block_cache_analyzer/block_cache_trace_analyzer.cc",
|
||||||
|
@ -22,26 +22,18 @@
|
|||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/iterator.h"
|
#include "rocksdb/iterator.h"
|
||||||
#include "rocksdb/utilities/object_registry.h"
|
#include "rocksdb/utilities/object_registry.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
static const int kValueSize = 1000;
|
static const int kValueSize = 1000;
|
||||||
|
|
||||||
namespace {
|
|
||||||
std::string RandomString(Random* rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
} // anonymous namespace
|
|
||||||
|
|
||||||
// counts how many operations were performed
|
// counts how many operations were performed
|
||||||
class EnvCounter : public EnvWrapper {
|
class EnvCounter : public EnvWrapper {
|
||||||
public:
|
public:
|
||||||
@ -109,11 +101,11 @@ class ColumnFamilyTestBase : public testing::Test {
|
|||||||
// preserves the implementation that was in place when all of the
|
// preserves the implementation that was in place when all of the
|
||||||
// magic values in this file were picked.
|
// magic values in this file were picked.
|
||||||
*storage = std::string(kValueSize, ' ');
|
*storage = std::string(kValueSize, ' ');
|
||||||
return Slice(*storage);
|
|
||||||
} else {
|
} else {
|
||||||
Random r(k);
|
Random r(k);
|
||||||
return test::RandomString(&r, kValueSize, storage);
|
*storage = r.RandomString(kValueSize);
|
||||||
}
|
}
|
||||||
|
return Slice(*storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Build(int base, int n, int flush_every = 0) {
|
void Build(int base, int n, int flush_every = 0) {
|
||||||
@ -329,11 +321,11 @@ class ColumnFamilyTestBase : public testing::Test {
|
|||||||
// 10 bytes for key, rest is value
|
// 10 bytes for key, rest is value
|
||||||
if (!save) {
|
if (!save) {
|
||||||
ASSERT_OK(Put(cf, test::RandomKey(&rnd_, 11),
|
ASSERT_OK(Put(cf, test::RandomKey(&rnd_, 11),
|
||||||
RandomString(&rnd_, key_value_size - 10)));
|
rnd_.RandomString(key_value_size - 10)));
|
||||||
} else {
|
} else {
|
||||||
std::string key = test::RandomKey(&rnd_, 11);
|
std::string key = test::RandomKey(&rnd_, 11);
|
||||||
keys_[cf].insert(key);
|
keys_[cf].insert(key);
|
||||||
ASSERT_OK(Put(cf, key, RandomString(&rnd_, key_value_size - 10)));
|
ASSERT_OK(Put(cf, key, rnd_.RandomString(key_value_size - 10)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
db_->FlushWAL(false);
|
db_->FlushWAL(false);
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "util/hash.h"
|
#include "util/hash.h"
|
||||||
#include "util/kv_map.h"
|
#include "util/kv_map.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
@ -342,12 +343,12 @@ TEST_P(ComparatorDBTest, SimpleSuffixReverseComparator) {
|
|||||||
std::vector<std::string> source_prefixes;
|
std::vector<std::string> source_prefixes;
|
||||||
// Randomly generate 5 prefixes
|
// Randomly generate 5 prefixes
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8));
|
source_prefixes.push_back(rnd.HumanReadableString(8));
|
||||||
}
|
}
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
int prefix_index = rnd.Uniform(static_cast<int>(source_prefixes.size()));
|
int prefix_index = rnd.Uniform(static_cast<int>(source_prefixes.size()));
|
||||||
std::string key = source_prefixes[prefix_index] +
|
std::string key = source_prefixes[prefix_index] +
|
||||||
test::RandomHumanReadableString(&rnd, rnd.Uniform(8));
|
rnd.HumanReadableString(rnd.Uniform(8));
|
||||||
source_strings.push_back(key);
|
source_strings.push_back(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,13 +9,13 @@
|
|||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
#include "rocksdb/db.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
@ -24,6 +24,7 @@
|
|||||||
#include "file/filename.h"
|
#include "file/filename.h"
|
||||||
#include "rocksdb/cache.h"
|
#include "rocksdb/cache.h"
|
||||||
#include "rocksdb/convenience.h"
|
#include "rocksdb/convenience.h"
|
||||||
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/table.h"
|
#include "rocksdb/table.h"
|
||||||
#include "rocksdb/write_batch.h"
|
#include "rocksdb/write_batch.h"
|
||||||
@ -31,6 +32,7 @@
|
|||||||
#include "table/meta_blocks.h"
|
#include "table/meta_blocks.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -219,11 +221,11 @@ class CorruptionTest : public testing::Test {
|
|||||||
// preserves the implementation that was in place when all of the
|
// preserves the implementation that was in place when all of the
|
||||||
// magic values in this file were picked.
|
// magic values in this file were picked.
|
||||||
*storage = std::string(kValueSize, ' ');
|
*storage = std::string(kValueSize, ' ');
|
||||||
return Slice(*storage);
|
|
||||||
} else {
|
} else {
|
||||||
Random r(k);
|
Random r(k);
|
||||||
return test::RandomString(&r, kValueSize, storage);
|
*storage = r.RandomString(kValueSize);
|
||||||
}
|
}
|
||||||
|
return Slice(*storage);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -16,10 +16,11 @@
|
|||||||
#include "rocksdb/utilities/debug.h"
|
#include "rocksdb/utilities/debug.h"
|
||||||
#include "table/block_based/block_based_table_reader.h"
|
#include "table/block_based/block_based_table_reader.h"
|
||||||
#include "table/block_based/block_builder.h"
|
#include "table/block_based/block_builder.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#if !defined(ROCKSDB_LITE)
|
#if !defined(ROCKSDB_LITE)
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#endif
|
#endif
|
||||||
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
#include "utilities/merge_operators/string_append/stringappend.h"
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
||||||
|
|
||||||
@ -2040,7 +2041,7 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) {
|
|||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
// Make the value compressible. A purely random string doesn't compress
|
// Make the value compressible. A purely random string doesn't compress
|
||||||
// and the resultant data block will not be compressed
|
// and the resultant data block will not be compressed
|
||||||
std::string value(RandomString(&rnd, 128) + zero_str);
|
std::string value(rnd.RandomString(128) + zero_str);
|
||||||
assert(Put(Key(i), value) == Status::OK());
|
assert(Put(Key(i), value) == Status::OK());
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
@ -2430,7 +2431,7 @@ class DBBasicTestMultiGet : public DBTestBase {
|
|||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
// Make the value compressible. A purely random string doesn't compress
|
// Make the value compressible. A purely random string doesn't compress
|
||||||
// and the resultant data block will not be compressed
|
// and the resultant data block will not be compressed
|
||||||
values_.emplace_back(RandomString(&rnd, 128) + zero_str);
|
values_.emplace_back(rnd.RandomString(128) + zero_str);
|
||||||
assert(((num_cfs == 1) ? Put(Key(i), values_[i])
|
assert(((num_cfs == 1) ? Put(Key(i), values_[i])
|
||||||
: Put(cf, Key(i), values_[i])) == Status::OK());
|
: Put(cf, Key(i), values_[i])) == Status::OK());
|
||||||
}
|
}
|
||||||
@ -2442,7 +2443,7 @@ class DBBasicTestMultiGet : public DBTestBase {
|
|||||||
|
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
// block cannot gain space by compression
|
// block cannot gain space by compression
|
||||||
uncompressable_values_.emplace_back(RandomString(&rnd, 256) + '\0');
|
uncompressable_values_.emplace_back(rnd.RandomString(256) + '\0');
|
||||||
std::string tmp_key = "a" + Key(i);
|
std::string tmp_key = "a" + Key(i);
|
||||||
assert(((num_cfs == 1) ? Put(tmp_key, uncompressable_values_[i])
|
assert(((num_cfs == 1) ? Put(tmp_key, uncompressable_values_[i])
|
||||||
: Put(cf, tmp_key, uncompressable_values_[i])) ==
|
: Put(cf, tmp_key, uncompressable_values_[i])) ==
|
||||||
@ -3210,7 +3211,7 @@ TEST_F(DBBasicTest, PointLookupDeadline) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 400; ++i) {
|
for (int i = 0; i < 400; ++i) {
|
||||||
std::string key = "k" + ToString(i);
|
std::string key = "k" + ToString(i);
|
||||||
Put(key, RandomString(&rnd, 100));
|
Put(key, rnd.RandomString(100));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
|
|
||||||
|
@ -7,10 +7,12 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
|
||||||
#include "cache/lru_cache.h"
|
#include "cache/lru_cache.h"
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "util/compression.h"
|
#include "util/compression.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -764,7 +766,7 @@ TEST_F(DBBlockCacheTest, CompressedCache) {
|
|||||||
std::string str;
|
std::string str;
|
||||||
for (int i = 0; i < num_iter; i++) {
|
for (int i = 0; i < num_iter; i++) {
|
||||||
if (i % 4 == 0) { // high compression ratio
|
if (i % 4 == 0) { // high compression ratio
|
||||||
str = RandomString(&rnd, 1000);
|
str = rnd.RandomString(1000);
|
||||||
}
|
}
|
||||||
values.push_back(str);
|
values.push_back(str);
|
||||||
ASSERT_OK(Put(1, Key(i), values[i]));
|
ASSERT_OK(Put(1, Key(i), values[i]));
|
||||||
@ -851,7 +853,7 @@ TEST_F(DBBlockCacheTest, CacheCompressionDict) {
|
|||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
ASSERT_EQ(i, NumTableFilesAtLevel(0, 0));
|
ASSERT_EQ(i, NumTableFilesAtLevel(0, 0));
|
||||||
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
||||||
std::string value = RandomString(&rnd, kNumBytesPerEntry);
|
std::string value = rnd.RandomString(kNumBytesPerEntry);
|
||||||
ASSERT_OK(Put(Key(j * kNumFiles + i), value.c_str()));
|
ASSERT_OK(Put(Key(j * kNumFiles + i), value.c_str()));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
@ -14,9 +14,10 @@
|
|||||||
#include "rocksdb/experimental.h"
|
#include "rocksdb/experimental.h"
|
||||||
#include "rocksdb/sst_file_writer.h"
|
#include "rocksdb/sst_file_writer.h"
|
||||||
#include "rocksdb/utilities/convenience.h"
|
#include "rocksdb/utilities/convenience.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "util/concurrent_task_limiter_impl.h"
|
#include "util/concurrent_task_limiter_impl.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -295,7 +296,7 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTrigger) {
|
|||||||
const int kTestSize = kCDTKeysPerBuffer * 1024;
|
const int kTestSize = kCDTKeysPerBuffer * 1024;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kTestSize; ++k) {
|
for (int k = 0; k < kTestSize; ++k) {
|
||||||
values.push_back(RandomString(&rnd, kCDTValueSize));
|
values.push_back(rnd.RandomString(kCDTValueSize));
|
||||||
ASSERT_OK(Put(Key(k), values[k]));
|
ASSERT_OK(Put(Key(k), values[k]));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -343,7 +344,7 @@ TEST_P(DBCompactionTestWithParam, CompactionsPreserveDeletes) {
|
|||||||
const int kTestSize = kCDTKeysPerBuffer;
|
const int kTestSize = kCDTKeysPerBuffer;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kTestSize; ++k) {
|
for (int k = 0; k < kTestSize; ++k) {
|
||||||
values.push_back(RandomString(&rnd, kCDTValueSize));
|
values.push_back(rnd.RandomString(kCDTValueSize));
|
||||||
ASSERT_OK(Put(Key(k), values[k]));
|
ASSERT_OK(Put(Key(k), values[k]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -408,7 +409,7 @@ TEST_F(DBCompactionTest, SkipStatsUpdateTest) {
|
|||||||
const int kTestSize = kCDTKeysPerBuffer * 512;
|
const int kTestSize = kCDTKeysPerBuffer * 512;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kTestSize; ++k) {
|
for (int k = 0; k < kTestSize; ++k) {
|
||||||
values.push_back(RandomString(&rnd, kCDTValueSize));
|
values.push_back(rnd.RandomString(kCDTValueSize));
|
||||||
ASSERT_OK(Put(Key(k), values[k]));
|
ASSERT_OK(Put(Key(k), values[k]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -555,7 +556,7 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) {
|
|||||||
const int kTestSize = kCDTKeysPerBuffer * 512;
|
const int kTestSize = kCDTKeysPerBuffer * 512;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kTestSize; ++k) {
|
for (int k = 0; k < kTestSize; ++k) {
|
||||||
values.push_back(RandomString(&rnd, kCDTValueSize));
|
values.push_back(rnd.RandomString(kCDTValueSize));
|
||||||
ASSERT_OK(Put(Key(k), values[k]));
|
ASSERT_OK(Put(Key(k), values[k]));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -673,7 +674,7 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) {
|
|||||||
const int kTestSize = kCDTKeysPerBuffer * 512;
|
const int kTestSize = kCDTKeysPerBuffer * 512;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kTestSize; ++k) {
|
for (int k = 0; k < kTestSize; ++k) {
|
||||||
values.push_back(RandomString(&rnd, kCDTValueSize));
|
values.push_back(rnd.RandomString(kCDTValueSize));
|
||||||
ASSERT_OK(Put(Key(k), values[k]));
|
ASSERT_OK(Put(Key(k), values[k]));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -736,7 +737,7 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 100KB (100 values, each 1K)
|
// Write 100KB (100 values, each 1K)
|
||||||
for (int i = 0; i < kNumKeysPerFile; i++) {
|
for (int i = 0; i < kNumKeysPerFile; i++) {
|
||||||
values.push_back(RandomString(&rnd, 990));
|
values.push_back(rnd.RandomString(990));
|
||||||
ASSERT_OK(Put(1, Key(i), values[i]));
|
ASSERT_OK(Put(1, Key(i), values[i]));
|
||||||
}
|
}
|
||||||
// put extra key to trigger flush
|
// put extra key to trigger flush
|
||||||
@ -748,7 +749,7 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
|
|||||||
// generate one more file in level-0, and should trigger level-0 compaction
|
// generate one more file in level-0, and should trigger level-0 compaction
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < kNumKeysPerFile; i++) {
|
for (int i = 0; i < kNumKeysPerFile; i++) {
|
||||||
values.push_back(RandomString(&rnd, 990));
|
values.push_back(rnd.RandomString(990));
|
||||||
ASSERT_OK(Put(1, Key(i), values[i]));
|
ASSERT_OK(Put(1, Key(i), values[i]));
|
||||||
}
|
}
|
||||||
// put extra key to trigger flush
|
// put extra key to trigger flush
|
||||||
@ -867,7 +868,7 @@ TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < 80; i++) {
|
for (int i = 0; i < 80; i++) {
|
||||||
values.push_back(RandomString(&rnd, 100000));
|
values.push_back(rnd.RandomString(100000));
|
||||||
ASSERT_OK(Put(1, Key(i), values[i]));
|
ASSERT_OK(Put(1, Key(i), values[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1105,7 +1106,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < num_keys; i++) {
|
for (int i = 0; i < num_keys; i++) {
|
||||||
values.push_back(RandomString(&rnd, value_size));
|
values.push_back(rnd.RandomString(value_size));
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1177,7 +1178,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
|
|||||||
std::map<int32_t, std::string> values;
|
std::map<int32_t, std::string> values;
|
||||||
for (size_t i = 0; i < ranges.size(); i++) {
|
for (size_t i = 0; i < ranges.size(); i++) {
|
||||||
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
|
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
|
||||||
values[j] = RandomString(&rnd, value_size);
|
values[j] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(j), values[j]));
|
ASSERT_OK(Put(Key(j), values[j]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1223,7 +1224,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
|
|||||||
};
|
};
|
||||||
for (size_t i = 0; i < ranges.size(); i++) {
|
for (size_t i = 0; i < ranges.size(); i++) {
|
||||||
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
|
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
|
||||||
values[j] = RandomString(&rnd, value_size);
|
values[j] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(j), values[j]));
|
ASSERT_OK(Put(Key(j), values[j]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1268,14 +1269,14 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) {
|
|||||||
|
|
||||||
// file 1 [0 => 300]
|
// file 1 [0 => 300]
|
||||||
for (int32_t i = 0; i <= 300; i++) {
|
for (int32_t i = 0; i <= 300; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// file 2 [600 => 700]
|
// file 2 [600 => 700]
|
||||||
for (int32_t i = 600; i <= 700; i++) {
|
for (int32_t i = 600; i <= 700; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1349,14 +1350,14 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) {
|
|||||||
|
|
||||||
// file 1 [0 => 100]
|
// file 1 [0 => 100]
|
||||||
for (int32_t i = 0; i < 100; i++) {
|
for (int32_t i = 0; i < 100; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// file 2 [100 => 300]
|
// file 2 [100 => 300]
|
||||||
for (int32_t i = 100; i < 300; i++) {
|
for (int32_t i = 100; i < 300; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1377,7 +1378,7 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) {
|
|||||||
|
|
||||||
// file 3 [ 0 => 200]
|
// file 3 [ 0 => 200]
|
||||||
for (int32_t i = 0; i < 200; i++) {
|
for (int32_t i = 0; i < 200; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1409,21 +1410,21 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) {
|
|||||||
TEST_SYNC_POINT("DBCompaction::ManualPartial:1");
|
TEST_SYNC_POINT("DBCompaction::ManualPartial:1");
|
||||||
// file 4 [300 => 400)
|
// file 4 [300 => 400)
|
||||||
for (int32_t i = 300; i <= 400; i++) {
|
for (int32_t i = 300; i <= 400; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// file 5 [400 => 500)
|
// file 5 [400 => 500)
|
||||||
for (int32_t i = 400; i <= 500; i++) {
|
for (int32_t i = 400; i <= 500; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// file 6 [500 => 600)
|
// file 6 [500 => 600)
|
||||||
for (int32_t i = 500; i <= 600; i++) {
|
for (int32_t i = 500; i <= 600; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
// Second non-trivial compaction is triggered
|
// Second non-trivial compaction is triggered
|
||||||
@ -1491,14 +1492,14 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) {
|
|||||||
|
|
||||||
// file 1 [0 => 100]
|
// file 1 [0 => 100]
|
||||||
for (int32_t i = 0; i < 100; i++) {
|
for (int32_t i = 0; i < 100; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// file 2 [100 => 300]
|
// file 2 [100 => 300]
|
||||||
for (int32_t i = 100; i < 300; i++) {
|
for (int32_t i = 100; i < 300; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1517,7 +1518,7 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) {
|
|||||||
|
|
||||||
// file 3 [ 0 => 200]
|
// file 3 [ 0 => 200]
|
||||||
for (int32_t i = 0; i < 200; i++) {
|
for (int32_t i = 0; i < 200; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1549,7 +1550,7 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
}
|
}
|
||||||
values[j] = RandomString(&rnd, value_size);
|
values[j] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(j), values[j]));
|
ASSERT_OK(Put(Key(j), values[j]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1620,14 +1621,14 @@ TEST_F(DBCompactionTest, DeleteFileRange) {
|
|||||||
|
|
||||||
// file 1 [0 => 100]
|
// file 1 [0 => 100]
|
||||||
for (int32_t i = 0; i < 100; i++) {
|
for (int32_t i = 0; i < 100; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// file 2 [100 => 300]
|
// file 2 [100 => 300]
|
||||||
for (int32_t i = 100; i < 300; i++) {
|
for (int32_t i = 100; i < 300; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1643,7 +1644,7 @@ TEST_F(DBCompactionTest, DeleteFileRange) {
|
|||||||
|
|
||||||
// file 3 [ 0 => 200]
|
// file 3 [ 0 => 200]
|
||||||
for (int32_t i = 0; i < 200; i++) {
|
for (int32_t i = 0; i < 200; i++) {
|
||||||
values[i] = RandomString(&rnd, value_size);
|
values[i] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1655,7 +1656,7 @@ TEST_F(DBCompactionTest, DeleteFileRange) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
}
|
}
|
||||||
values[j] = RandomString(&rnd, value_size);
|
values[j] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(j), values[j]));
|
ASSERT_OK(Put(Key(j), values[j]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1742,7 +1743,7 @@ TEST_F(DBCompactionTest, DeleteFilesInRanges) {
|
|||||||
for (auto i = 0; i < 10; i++) {
|
for (auto i = 0; i < 10; i++) {
|
||||||
for (auto j = 0; j < 100; j++) {
|
for (auto j = 0; j < 100; j++) {
|
||||||
auto k = i * 100 + j;
|
auto k = i * 100 + j;
|
||||||
values[k] = RandomString(&rnd, value_size);
|
values[k] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(k), values[k]));
|
ASSERT_OK(Put(Key(k), values[k]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1874,7 +1875,7 @@ TEST_F(DBCompactionTest, DeleteFileRangeFileEndpointsOverlapBug) {
|
|||||||
// would cause `1 -> vals[0]` (an older key) to reappear.
|
// would cause `1 -> vals[0]` (an older key) to reappear.
|
||||||
std::string vals[kNumL0Files];
|
std::string vals[kNumL0Files];
|
||||||
for (int i = 0; i < kNumL0Files; ++i) {
|
for (int i = 0; i < kNumL0Files; ++i) {
|
||||||
vals[i] = RandomString(&rnd, kValSize);
|
vals[i] = rnd.RandomString(kValSize);
|
||||||
Put(Key(i), vals[i]);
|
Put(Key(i), vals[i]);
|
||||||
Put(Key(i + 1), vals[i]);
|
Put(Key(i + 1), vals[i]);
|
||||||
Flush();
|
Flush();
|
||||||
@ -1916,7 +1917,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// File with keys [ 0 => 99 ]
|
// File with keys [ 0 => 99 ]
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
values.push_back(RandomString(&rnd, value_size));
|
values.push_back(rnd.RandomString(value_size));
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -1934,7 +1935,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) {
|
|||||||
|
|
||||||
// File with keys [ 100 => 199 ]
|
// File with keys [ 100 => 199 ]
|
||||||
for (int i = 100; i < 200; i++) {
|
for (int i = 100; i < 200; i++) {
|
||||||
values.push_back(RandomString(&rnd, value_size));
|
values.push_back(rnd.RandomString(value_size));
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -2329,7 +2330,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
|
|||||||
|
|
||||||
for (int i = 0; i <= max_key_level_insert; i++) {
|
for (int i = 0; i <= max_key_level_insert; i++) {
|
||||||
// each value is 10K
|
// each value is 10K
|
||||||
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -2387,7 +2388,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
|
|||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
|
|
||||||
for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) {
|
for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) {
|
||||||
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
|
||||||
}
|
}
|
||||||
dbfull()->Flush(FlushOptions());
|
dbfull()->Flush(FlushOptions());
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
@ -2682,7 +2683,7 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize)));
|
ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -2758,8 +2759,8 @@ TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) {
|
|||||||
std::vector<std::string> keys;
|
std::vector<std::string> keys;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kNumInsertedKeys; ++k) {
|
for (int k = 0; k < kNumInsertedKeys; ++k) {
|
||||||
keys.emplace_back(RandomString(&rnd, kKeySize));
|
keys.emplace_back(rnd.RandomString(kKeySize));
|
||||||
values.emplace_back(RandomString(&rnd, kKvSize - kKeySize));
|
values.emplace_back(rnd.RandomString(kKvSize - kKeySize));
|
||||||
ASSERT_OK(Put(Slice(keys[k]), Slice(values[k])));
|
ASSERT_OK(Put(Slice(keys[k]), Slice(values[k])));
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
}
|
}
|
||||||
@ -2825,7 +2826,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
// Create 1MB sst file
|
// Create 1MB sst file
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
|
ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -2860,7 +2861,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
// Create 1MB sst file
|
// Create 1MB sst file
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
ASSERT_OK(Put(Key(i * 50 + j + 100), RandomString(&rnd, 10 * 1024)));
|
ASSERT_OK(Put(Key(i * 50 + j + 100), rnd.RandomString(10 * 1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -3118,7 +3119,7 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// File with keys [ 0 => 99 ]
|
// File with keys [ 0 => 99 ]
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
values.push_back(RandomString(&rnd, value_size));
|
values.push_back(rnd.RandomString(value_size));
|
||||||
ASSERT_OK(Put(ShortKey(i), values[i]));
|
ASSERT_OK(Put(ShortKey(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -3135,7 +3136,7 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) {
|
|||||||
|
|
||||||
// File with keys [ 100 => 199 ]
|
// File with keys [ 100 => 199 ]
|
||||||
for (int i = 100; i < 200; i++) {
|
for (int i = 100; i < 200; i++) {
|
||||||
values.push_back(RandomString(&rnd, value_size));
|
values.push_back(rnd.RandomString(value_size));
|
||||||
ASSERT_OK(Put(ShortKey(i), values[i]));
|
ASSERT_OK(Put(ShortKey(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -3153,7 +3154,7 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) {
|
|||||||
|
|
||||||
// File with keys [ 200 => 299 ]
|
// File with keys [ 200 => 299 ]
|
||||||
for (int i = 200; i < 300; i++) {
|
for (int i = 200; i < 300; i++) {
|
||||||
values.push_back(RandomString(&rnd, value_size));
|
values.push_back(rnd.RandomString(value_size));
|
||||||
ASSERT_OK(Put(ShortKey(i), values[i]));
|
ASSERT_OK(Put(ShortKey(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -3197,7 +3198,7 @@ TEST_P(DBCompactionTestWithParam, IntraL0Compaction) {
|
|||||||
|
|
||||||
const size_t kValueSize = 1 << 20;
|
const size_t kValueSize = 1 << 20;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value(RandomString(&rnd, kValueSize));
|
std::string value(rnd.RandomString(kValueSize));
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
||||||
{{"LevelCompactionPicker::PickCompactionBySize:0",
|
{{"LevelCompactionPicker::PickCompactionBySize:0",
|
||||||
@ -3261,7 +3262,7 @@ TEST_P(DBCompactionTestWithParam, IntraL0CompactionDoesNotObsoleteDeletions) {
|
|||||||
|
|
||||||
const size_t kValueSize = 1 << 20;
|
const size_t kValueSize = 1 << 20;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value(RandomString(&rnd, kValueSize));
|
std::string value(rnd.RandomString(kValueSize));
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
||||||
{{"LevelCompactionPicker::PickCompactionBySize:0",
|
{{"LevelCompactionPicker::PickCompactionBySize:0",
|
||||||
@ -3480,7 +3481,7 @@ TEST_F(DBCompactionTest, CompactBottomLevelFilesWithDeletions) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
if (i == kNumLevelFiles - 1) {
|
if (i == kNumLevelFiles - 1) {
|
||||||
snapshot = db_->GetSnapshot();
|
snapshot = db_->GetSnapshot();
|
||||||
@ -3552,7 +3553,7 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -3598,7 +3599,7 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -3693,7 +3694,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
|||||||
// Add two L6 files with key ranges: [1 .. 100], [101 .. 200].
|
// Add two L6 files with key ranges: [1 .. 100], [101 .. 200].
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 1; i <= 100; ++i) {
|
for (int i = 1; i <= 100; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
// Get the first file's creation time. This will be the oldest file in the
|
// Get the first file's creation time. This will be the oldest file in the
|
||||||
@ -3706,7 +3707,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
|||||||
// Add 1 hour and do another flush.
|
// Add 1 hour and do another flush.
|
||||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||||
for (int i = 101; i <= 200; ++i) {
|
for (int i = 101; i <= 200; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
MoveFilesToLevel(6);
|
MoveFilesToLevel(6);
|
||||||
@ -3715,12 +3716,12 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
|||||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||||
// Add two L4 files with key ranges: [1 .. 50], [51 .. 150].
|
// Add two L4 files with key ranges: [1 .. 50], [51 .. 150].
|
||||||
for (int i = 1; i <= 50; ++i) {
|
for (int i = 1; i <= 50; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||||
for (int i = 51; i <= 150; ++i) {
|
for (int i = 51; i <= 150; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
MoveFilesToLevel(4);
|
MoveFilesToLevel(4);
|
||||||
@ -3729,7 +3730,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
|||||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||||
// Add one L1 file with key range: [26, 75].
|
// Add one L1 file with key range: [26, 75].
|
||||||
for (int i = 26; i <= 75; ++i) {
|
for (int i = 26; i <= 75; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -3840,8 +3841,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompaction) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(Put(Key(i * kNumKeysPerFile + j),
|
ASSERT_OK(
|
||||||
RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -3935,7 +3936,7 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithOldDB) {
|
|||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
// Move the first two files to L2.
|
// Move the first two files to L2.
|
||||||
@ -3998,7 +3999,7 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4109,7 +4110,7 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4169,7 +4170,7 @@ TEST_F(DBCompactionTest, CompactRangeDelayedByL0FileCount) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int j = 0; j < kNumL0FilesLimit - 1; ++j) {
|
for (int j = 0; j < kNumL0FilesLimit - 1; ++j) {
|
||||||
for (int k = 0; k < 2; ++k) {
|
for (int k = 0; k < 2; ++k) {
|
||||||
ASSERT_OK(Put(Key(k), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(k), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4223,7 +4224,7 @@ TEST_F(DBCompactionTest, CompactRangeDelayedByImmMemTableCount) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int j = 0; j < kNumImmMemTableLimit - 1; ++j) {
|
for (int j = 0; j < kNumImmMemTableLimit - 1; ++j) {
|
||||||
ASSERT_OK(Put(Key(0), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(0), rnd.RandomString(1024)));
|
||||||
FlushOptions flush_opts;
|
FlushOptions flush_opts;
|
||||||
flush_opts.wait = false;
|
flush_opts.wait = false;
|
||||||
flush_opts.allow_write_stall = true;
|
flush_opts.allow_write_stall = true;
|
||||||
@ -4271,7 +4272,7 @@ TEST_F(DBCompactionTest, CompactRangeShutdownWhileDelayed) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int j = 0; j < kNumL0FilesLimit - 1; ++j) {
|
for (int j = 0; j < kNumL0FilesLimit - 1; ++j) {
|
||||||
for (int k = 0; k < 2; ++k) {
|
for (int k = 0; k < 2; ++k) {
|
||||||
ASSERT_OK(Put(1, Key(k), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(1, Key(k), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
Flush(1);
|
Flush(1);
|
||||||
}
|
}
|
||||||
@ -4331,7 +4332,7 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) {
|
|||||||
flush_opts.allow_write_stall = true;
|
flush_opts.allow_write_stall = true;
|
||||||
for (int i = 0; i < kNumL0FilesLimit - 1; ++i) {
|
for (int i = 0; i < kNumL0FilesLimit - 1; ++i) {
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
dbfull()->Flush(flush_opts);
|
dbfull()->Flush(flush_opts);
|
||||||
}
|
}
|
||||||
@ -4342,9 +4343,9 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush");
|
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush");
|
||||||
Put(ToString(0), RandomString(&rnd, 1024));
|
Put(ToString(0), rnd.RandomString(1024));
|
||||||
dbfull()->Flush(flush_opts);
|
dbfull()->Flush(flush_opts);
|
||||||
Put(ToString(0), RandomString(&rnd, 1024));
|
Put(ToString(0), rnd.RandomString(1024));
|
||||||
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush");
|
TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush");
|
||||||
manual_compaction_thread.join();
|
manual_compaction_thread.join();
|
||||||
|
|
||||||
@ -4784,7 +4785,7 @@ TEST_P(CompactionPriTest, Test) {
|
|||||||
RandomShuffle(std::begin(keys), std::end(keys), rnd.Next());
|
RandomShuffle(std::begin(keys), std::end(keys), rnd.Next());
|
||||||
|
|
||||||
for (int i = 0; i < kNKeys; i++) {
|
for (int i = 0; i < kNKeys; i++) {
|
||||||
ASSERT_OK(Put(Key(keys[i]), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(keys[i]), rnd.RandomString(102)));
|
||||||
}
|
}
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -4826,7 +4827,7 @@ TEST_F(DBCompactionTest, PartialManualCompaction) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (auto i = 0; i < 8; ++i) {
|
for (auto i = 0; i < 8; ++i) {
|
||||||
for (auto j = 0; j < 10; ++j) {
|
for (auto j = 0; j < 10; ++j) {
|
||||||
Merge("foo", RandomString(&rnd, 1024));
|
Merge("foo", rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4858,8 +4859,8 @@ TEST_F(DBCompactionTest, ManualCompactionFailsInReadOnlyMode) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < kNumL0Files; ++i) {
|
for (int i = 0; i < kNumL0Files; ++i) {
|
||||||
// Make sure files are overlapping in key-range to prevent trivial move.
|
// Make sure files are overlapping in key-range to prevent trivial move.
|
||||||
Put("key1", RandomString(&rnd, 1024));
|
Put("key1", rnd.RandomString(1024));
|
||||||
Put("key2", RandomString(&rnd, 1024));
|
Put("key2", rnd.RandomString(1024));
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
ASSERT_EQ(kNumL0Files, NumTableFilesAtLevel(0));
|
ASSERT_EQ(kNumL0Files, NumTableFilesAtLevel(0));
|
||||||
@ -4868,7 +4869,7 @@ TEST_F(DBCompactionTest, ManualCompactionFailsInReadOnlyMode) {
|
|||||||
mock_env->SetFilesystemActive(false);
|
mock_env->SetFilesystemActive(false);
|
||||||
// Make sure this is outside `CompactRange`'s range so that it doesn't fail
|
// Make sure this is outside `CompactRange`'s range so that it doesn't fail
|
||||||
// early trying to flush memtable.
|
// early trying to flush memtable.
|
||||||
ASSERT_NOK(Put("key3", RandomString(&rnd, 1024)));
|
ASSERT_NOK(Put("key3", rnd.RandomString(1024)));
|
||||||
|
|
||||||
// In the bug scenario, the first manual compaction would fail and forget to
|
// In the bug scenario, the first manual compaction would fail and forget to
|
||||||
// unregister itself, causing the second one to hang forever due to conflict
|
// unregister itself, causing the second one to hang forever due to conflict
|
||||||
@ -4907,7 +4908,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) {
|
|||||||
for (auto i = 0; i < 8; ++i) {
|
for (auto i = 0; i < 8; ++i) {
|
||||||
for (auto j = 0; j < 10; ++j) {
|
for (auto j = 0; j < 10; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put("foo" + std::to_string(i * 10 + j), RandomString(&rnd, 1024)));
|
Put("foo" + std::to_string(i * 10 + j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4917,7 +4918,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) {
|
|||||||
for (auto i = 0; i < 8; ++i) {
|
for (auto i = 0; i < 8; ++i) {
|
||||||
for (auto j = 0; j < 10; ++j) {
|
for (auto j = 0; j < 10; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put("bar" + std::to_string(i * 10 + j), RandomString(&rnd, 1024)));
|
Put("bar" + std::to_string(i * 10 + j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4951,7 +4952,7 @@ TEST_F(DBCompactionTest, CompactionDuringShutdown) {
|
|||||||
for (auto i = 0; i < 2; ++i) {
|
for (auto i = 0; i < 2; ++i) {
|
||||||
for (auto j = 0; j < 10; ++j) {
|
for (auto j = 0; j < 10; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put("foo" + std::to_string(i * 10 + j), RandomString(&rnd, 1024)));
|
Put("foo" + std::to_string(i * 10 + j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4974,7 +4975,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) {
|
|||||||
|
|
||||||
// Generate an external SST file containing a single key, i.e. 99
|
// Generate an external SST file containing a single key, i.e. 99
|
||||||
std::string sst_files_dir = dbname_ + "/sst_files/";
|
std::string sst_files_dir = dbname_ + "/sst_files/";
|
||||||
test::DestroyDir(env_, sst_files_dir);
|
DestroyDir(env_, sst_files_dir);
|
||||||
ASSERT_OK(env_->CreateDir(sst_files_dir));
|
ASSERT_OK(env_->CreateDir(sst_files_dir));
|
||||||
SstFileWriter sst_writer(EnvOptions(), options);
|
SstFileWriter sst_writer(EnvOptions(), options);
|
||||||
const std::string sst_file_path = sst_files_dir + "test.sst";
|
const std::string sst_file_path = sst_files_dir + "test.sst";
|
||||||
@ -5001,7 +5002,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) {
|
|||||||
// Generate level0_stop_writes_trigger L0 files to trigger write stop
|
// Generate level0_stop_writes_trigger L0 files to trigger write stop
|
||||||
for (int i = 0; i != options.level0_file_num_compaction_trigger; ++i) {
|
for (int i = 0; i != options.level0_file_num_compaction_trigger; ++i) {
|
||||||
for (int j = 0; j != kNumKeysPerFile; ++j) {
|
for (int j = 0; j != kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(990)));
|
||||||
}
|
}
|
||||||
if (0 == i) {
|
if (0 == i) {
|
||||||
// When we reach here, the memtables have kNumKeysPerFile keys. Note that
|
// When we reach here, the memtables have kNumKeysPerFile keys. Note that
|
||||||
@ -5093,7 +5094,7 @@ TEST_F(DBCompactionTest, ConsistencyFailTest2) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value = RandomString(&rnd, 1000);
|
std::string value = rnd.RandomString(1000);
|
||||||
|
|
||||||
ASSERT_OK(Put("foo1", value));
|
ASSERT_OK(Put("foo1", value));
|
||||||
ASSERT_OK(Put("z", ""));
|
ASSERT_OK(Put("z", ""));
|
||||||
@ -5140,7 +5141,7 @@ TEST_P(DBCompactionTestWithParam,
|
|||||||
const size_t kValueSize = 1 << 20;
|
const size_t kValueSize = 1 << 20;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::atomic<int> pick_intra_l0_count(0);
|
std::atomic<int> pick_intra_l0_count(0);
|
||||||
std::string value(RandomString(&rnd, kValueSize));
|
std::string value(rnd.RandomString(kValueSize));
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
||||||
{{"DBCompactionTestWithParam::FlushAfterIntraL0:1",
|
{{"DBCompactionTestWithParam::FlushAfterIntraL0:1",
|
||||||
@ -5207,8 +5208,8 @@ TEST_P(DBCompactionTestWithParam,
|
|||||||
|
|
||||||
const size_t kValueSize = 1 << 20;
|
const size_t kValueSize = 1 << 20;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value(RandomString(&rnd, kValueSize));
|
std::string value(rnd.RandomString(kValueSize));
|
||||||
std::string value2(RandomString(&rnd, kValueSize));
|
std::string value2(rnd.RandomString(kValueSize));
|
||||||
std::string bigvalue = value + value;
|
std::string bigvalue = value + value;
|
||||||
|
|
||||||
// prevents trivial move
|
// prevents trivial move
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
class DBTestDynamicLevel : public DBTestBase {
|
class DBTestDynamicLevel : public DBTestBase {
|
||||||
@ -80,9 +81,9 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) {
|
|||||||
|
|
||||||
for (int i = 0; i < kNKeys; i++) {
|
for (int i = 0; i < kNKeys; i++) {
|
||||||
int key = keys[i];
|
int key = keys[i];
|
||||||
ASSERT_OK(Put(Key(kNKeys + key), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(kNKeys + key), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Put(Key(key), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(key), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Put(Key(kNKeys * 2 + key), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(kNKeys * 2 + key), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Delete(Key(kNKeys + keys[i / 10])));
|
ASSERT_OK(Delete(Key(kNKeys + keys[i / 10])));
|
||||||
env_->SleepForMicroseconds(5000);
|
env_->SleepForMicroseconds(5000);
|
||||||
}
|
}
|
||||||
@ -158,7 +159,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
|||||||
// Put about 28K to L0
|
// Put about 28K to L0
|
||||||
for (int i = 0; i < 70; i++) {
|
for (int i = 0; i < 70; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
||||||
RandomString(&rnd, 380)));
|
rnd.RandomString(380)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"disable_auto_compactions", "false"},
|
{"disable_auto_compactions", "false"},
|
||||||
@ -175,7 +176,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
|||||||
}));
|
}));
|
||||||
for (int i = 0; i < 70; i++) {
|
for (int i = 0; i < 70; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
||||||
RandomString(&rnd, 380)));
|
rnd.RandomString(380)));
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
@ -197,7 +198,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
|||||||
// Write about 40K more
|
// Write about 40K more
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
||||||
RandomString(&rnd, 380)));
|
rnd.RandomString(380)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"disable_auto_compactions", "false"},
|
{"disable_auto_compactions", "false"},
|
||||||
@ -216,7 +217,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
|||||||
// Each file is about 11KB, with 9KB of data.
|
// Each file is about 11KB, with 9KB of data.
|
||||||
for (int i = 0; i < 1300; i++) {
|
for (int i = 0; i < 1300; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
||||||
RandomString(&rnd, 380)));
|
rnd.RandomString(380)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that the compaction starts before the last bit of data is
|
// Make sure that the compaction starts before the last bit of data is
|
||||||
@ -257,7 +258,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
|||||||
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1");
|
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1");
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
||||||
RandomString(&rnd, 380)));
|
rnd.RandomString(380)));
|
||||||
}
|
}
|
||||||
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2");
|
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2");
|
||||||
|
|
||||||
@ -310,15 +311,15 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
|
|||||||
|
|
||||||
// Put about 7K to L0
|
// Put about 7K to L0
|
||||||
for (int i = 0; i < 140; i++) {
|
for (int i = 0; i < 140; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(
|
||||||
RandomString(&rnd, 80)));
|
Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
if (NumTableFilesAtLevel(0) == 0) {
|
if (NumTableFilesAtLevel(0) == 0) {
|
||||||
// Make sure level 0 is not empty
|
// Make sure level 0 is not empty
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
ASSERT_OK(
|
||||||
RandomString(&rnd, 80)));
|
Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80)));
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,7 +383,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) {
|
|||||||
const int total_keys = 3000;
|
const int total_keys = 3000;
|
||||||
const int random_part_size = 100;
|
const int random_part_size = 100;
|
||||||
for (int i = 0; i < total_keys; i++) {
|
for (int i = 0; i < total_keys; i++) {
|
||||||
std::string value = RandomString(&rnd, random_part_size);
|
std::string value = rnd.RandomString(random_part_size);
|
||||||
PutFixed32(&value, static_cast<uint32_t>(i));
|
PutFixed32(&value, static_cast<uint32_t>(i));
|
||||||
ASSERT_OK(Put(Key(i), value));
|
ASSERT_OK(Put(Key(i), value));
|
||||||
}
|
}
|
||||||
@ -441,8 +442,8 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
|
|||||||
|
|
||||||
int total_keys = 1000;
|
int total_keys = 1000;
|
||||||
for (int i = 0; i < total_keys; i++) {
|
for (int i = 0; i < total_keys; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Delete(Key(i / 10)));
|
ASSERT_OK(Delete(Key(i / 10)));
|
||||||
}
|
}
|
||||||
verify_func(total_keys, false);
|
verify_func(total_keys, false);
|
||||||
@ -475,8 +476,8 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
|
|||||||
|
|
||||||
int total_keys2 = 2000;
|
int total_keys2 = 2000;
|
||||||
for (int i = total_keys; i < total_keys2; i++) {
|
for (int i = total_keys; i < total_keys2; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Delete(Key(i / 10)));
|
ASSERT_OK(Delete(Key(i / 10)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "util/cast_util.h"
|
#include "util/cast_util.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
|
@ -10,8 +10,8 @@
|
|||||||
#include "db/db_impl/db_impl_secondary.h"
|
#include "db/db_impl/db_impl_secondary.h"
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -281,8 +282,8 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string rnd_str =
|
std::string rnd_str =
|
||||||
RandomString(&rnd, static_cast<int>(options.bytes_per_sync / 2));
|
rnd.RandomString(static_cast<int>(options.bytes_per_sync / 2));
|
||||||
std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024);
|
std::string rnd_str_512kb = rnd.RandomString(512 * 1024);
|
||||||
|
|
||||||
ASSERT_OK(Put(1, "foo", "bar"));
|
ASSERT_OK(Put(1, "foo", "bar"));
|
||||||
// First 1MB doesn't get range synced
|
// First 1MB doesn't get range synced
|
||||||
@ -330,8 +331,8 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string rnd_str =
|
std::string rnd_str =
|
||||||
RandomString(&rnd, static_cast<int>(options.bytes_per_sync / 2));
|
rnd.RandomString(static_cast<int>(options.bytes_per_sync / 2));
|
||||||
std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024);
|
std::string rnd_str_512kb = rnd.RandomString(512 * 1024);
|
||||||
|
|
||||||
ASSERT_OK(Put(1, "foo", "bar"));
|
ASSERT_OK(Put(1, "foo", "bar"));
|
||||||
// First 1MB doesn't get range synced
|
// First 1MB doesn't get range synced
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "rocksdb/iostats_context.h"
|
#include "rocksdb/iostats_context.h"
|
||||||
#include "rocksdb/perf_context.h"
|
#include "rocksdb/perf_context.h"
|
||||||
#include "table/block_based/flush_block_policy.h"
|
#include "table/block_based/flush_block_policy.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -194,10 +195,10 @@ TEST_P(DBIteratorTest, IterReseekNewUpperBound) {
|
|||||||
options.compression = kNoCompression;
|
options.compression = kNoCompression;
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
ASSERT_OK(Put("a", RandomString(&rnd, 400)));
|
ASSERT_OK(Put("a", rnd.RandomString(400)));
|
||||||
ASSERT_OK(Put("aabb", RandomString(&rnd, 400)));
|
ASSERT_OK(Put("aabb", rnd.RandomString(400)));
|
||||||
ASSERT_OK(Put("aaef", RandomString(&rnd, 400)));
|
ASSERT_OK(Put("aaef", rnd.RandomString(400)));
|
||||||
ASSERT_OK(Put("b", RandomString(&rnd, 400)));
|
ASSERT_OK(Put("b", rnd.RandomString(400)));
|
||||||
dbfull()->Flush(FlushOptions());
|
dbfull()->Flush(FlushOptions());
|
||||||
ReadOptions opts;
|
ReadOptions opts;
|
||||||
Slice ub = Slice("aa");
|
Slice ub = Slice("aa");
|
||||||
@ -1360,7 +1361,7 @@ class DBIteratorTestForPinnedData : public DBIteratorTest {
|
|||||||
|
|
||||||
std::vector<std::string> generated_keys(key_pool);
|
std::vector<std::string> generated_keys(key_pool);
|
||||||
for (int i = 0; i < key_pool; i++) {
|
for (int i = 0; i < key_pool; i++) {
|
||||||
generated_keys[i] = RandomString(&rnd, key_size);
|
generated_keys[i] = rnd.RandomString(key_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, std::string> true_data;
|
std::map<std::string, std::string> true_data;
|
||||||
@ -1368,7 +1369,7 @@ class DBIteratorTestForPinnedData : public DBIteratorTest {
|
|||||||
std::vector<std::string> deleted_keys;
|
std::vector<std::string> deleted_keys;
|
||||||
for (int i = 0; i < puts; i++) {
|
for (int i = 0; i < puts; i++) {
|
||||||
auto& k = generated_keys[rnd.Next() % key_pool];
|
auto& k = generated_keys[rnd.Next() % key_pool];
|
||||||
auto v = RandomString(&rnd, val_size);
|
auto v = rnd.RandomString(val_size);
|
||||||
|
|
||||||
// Insert data to true_data map and to DB
|
// Insert data to true_data map and to DB
|
||||||
true_data[k] = v;
|
true_data[k] = v;
|
||||||
@ -1531,7 +1532,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 1; i <= 1000; i++) {
|
for (int i = 1; i <= 1000; i++) {
|
||||||
std::string k = Key(i * 3);
|
std::string k = Key(i * 3);
|
||||||
std::string v = RandomString(&rnd, 100);
|
std::string v = rnd.RandomString(100);
|
||||||
ASSERT_OK(Put(k, v));
|
ASSERT_OK(Put(k, v));
|
||||||
true_data[k] = v;
|
true_data[k] = v;
|
||||||
if (i % 250 == 0) {
|
if (i % 250 == 0) {
|
||||||
@ -1545,7 +1546,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) {
|
|||||||
// Generate 4 sst files in L0
|
// Generate 4 sst files in L0
|
||||||
for (int i = 1; i <= 1000; i++) {
|
for (int i = 1; i <= 1000; i++) {
|
||||||
std::string k = Key(i * 2);
|
std::string k = Key(i * 2);
|
||||||
std::string v = RandomString(&rnd, 100);
|
std::string v = rnd.RandomString(100);
|
||||||
ASSERT_OK(Put(k, v));
|
ASSERT_OK(Put(k, v));
|
||||||
true_data[k] = v;
|
true_data[k] = v;
|
||||||
if (i % 250 == 0) {
|
if (i % 250 == 0) {
|
||||||
@ -1557,7 +1558,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) {
|
|||||||
// Add some keys/values in memtables
|
// Add some keys/values in memtables
|
||||||
for (int i = 1; i <= 1000; i++) {
|
for (int i = 1; i <= 1000; i++) {
|
||||||
std::string k = Key(i);
|
std::string k = Key(i);
|
||||||
std::string v = RandomString(&rnd, 100);
|
std::string v = rnd.RandomString(100);
|
||||||
ASSERT_OK(Put(k, v));
|
ASSERT_OK(Put(k, v));
|
||||||
true_data[k] = v;
|
true_data[k] = v;
|
||||||
}
|
}
|
||||||
@ -1659,8 +1660,8 @@ TEST_P(DBIteratorTest, PinnedDataIteratorReadAfterUpdate) {
|
|||||||
|
|
||||||
std::map<std::string, std::string> true_data;
|
std::map<std::string, std::string> true_data;
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
std::string k = RandomString(&rnd, 10);
|
std::string k = rnd.RandomString(10);
|
||||||
std::string v = RandomString(&rnd, 1000);
|
std::string v = rnd.RandomString(1000);
|
||||||
ASSERT_OK(Put(k, v));
|
ASSERT_OK(Put(k, v));
|
||||||
true_data[k] = v;
|
true_data[k] = v;
|
||||||
}
|
}
|
||||||
@ -1674,7 +1675,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorReadAfterUpdate) {
|
|||||||
if (rnd.OneIn(2)) {
|
if (rnd.OneIn(2)) {
|
||||||
ASSERT_OK(Delete(kv.first));
|
ASSERT_OK(Delete(kv.first));
|
||||||
} else {
|
} else {
|
||||||
std::string new_val = RandomString(&rnd, 1000);
|
std::string new_val = rnd.RandomString(1000);
|
||||||
ASSERT_OK(Put(kv.first, new_val));
|
ASSERT_OK(Put(kv.first, new_val));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1931,7 +1932,7 @@ TEST_P(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) {
|
|||||||
|
|
||||||
for (int i = 0; i < kNumKeys; i++) {
|
for (int i = 0; i < kNumKeys; i++) {
|
||||||
gen_key = Key(i);
|
gen_key = Key(i);
|
||||||
gen_val = RandomString(&rnd, kValSize);
|
gen_val = rnd.RandomString(kValSize);
|
||||||
|
|
||||||
ASSERT_OK(Put(gen_key, gen_val));
|
ASSERT_OK(Put(gen_key, gen_val));
|
||||||
true_data[gen_key] = gen_val;
|
true_data[gen_key] = gen_val;
|
||||||
@ -1949,7 +1950,7 @@ TEST_P(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) {
|
|||||||
|
|
||||||
for (int j = 0; j < kNumMergeOperands; j++) {
|
for (int j = 0; j < kNumMergeOperands; j++) {
|
||||||
gen_key = Key(i);
|
gen_key = Key(i);
|
||||||
gen_val = RandomString(&rnd, kValSize);
|
gen_val = rnd.RandomString(kValSize);
|
||||||
|
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), gen_key, gen_val));
|
ASSERT_OK(db_->Merge(WriteOptions(), gen_key, gen_val));
|
||||||
true_data[gen_key] += "," + gen_val;
|
true_data[gen_key] += "," + gen_val;
|
||||||
@ -2049,7 +2050,7 @@ TEST_P(DBIteratorTest, IteratorWithLocalStatistics) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
// Key 10 bytes / Value 10 bytes
|
// Key 10 bytes / Value 10 bytes
|
||||||
ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10)));
|
ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10)));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::atomic<uint64_t> total_next(0);
|
std::atomic<uint64_t> total_next(0);
|
||||||
@ -2705,7 +2706,7 @@ TEST_P(DBIteratorTest, AvoidReseekLevelIterator) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string random_str = RandomString(&rnd, 180);
|
std::string random_str = rnd.RandomString(180);
|
||||||
|
|
||||||
ASSERT_OK(Put("1", random_str));
|
ASSERT_OK(Put("1", random_str));
|
||||||
ASSERT_OK(Put("2", random_str));
|
ASSERT_OK(Put("2", random_str));
|
||||||
|
@ -8,11 +8,11 @@
|
|||||||
#include "rocksdb/perf_context.h"
|
#include "rocksdb/perf_context.h"
|
||||||
#include "rocksdb/utilities/debug.h"
|
#include "rocksdb/utilities/debug.h"
|
||||||
#include "table/block_based/block_builder.h"
|
#include "table/block_based/block_builder.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#if !defined(ROCKSDB_LITE)
|
#if !defined(ROCKSDB_LITE)
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#endif
|
#endif
|
||||||
#include "rocksdb/merge_operator.h"
|
#include "rocksdb/merge_operator.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
#include "utilities/merge_operators/sortlist.h"
|
#include "utilities/merge_operators/sortlist.h"
|
||||||
#include "utilities/merge_operators/string_append/stringappend2.h"
|
#include "utilities/merge_operators/string_append/stringappend2.h"
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include "db/forward_iterator.h"
|
#include "db/forward_iterator.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/merge_operator.h"
|
#include "rocksdb/merge_operator.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
#include "utilities/merge_operators/string_append/stringappend2.h"
|
#include "utilities/merge_operators/string_append/stringappend2.h"
|
||||||
|
|
||||||
@ -242,7 +243,7 @@ TEST_P(MergeOperatorPinningTest, OperandsMultiBlocks) {
|
|||||||
std::string key = Key(key_id % 35);
|
std::string key = Key(key_id % 35);
|
||||||
key_id++;
|
key_id++;
|
||||||
for (int k = 0; k < kOperandsPerKeyPerFile; k++) {
|
for (int k = 0; k < kOperandsPerKeyPerFile; k++) {
|
||||||
std::string val = RandomString(&rnd, kOperandSize);
|
std::string val = rnd.RandomString(kOperandSize);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), key, val));
|
ASSERT_OK(db_->Merge(WriteOptions(), key, val));
|
||||||
if (true_data[key].size() == 0) {
|
if (true_data[key].size() == 0) {
|
||||||
true_data[key] = val;
|
true_data[key] = val;
|
||||||
@ -327,7 +328,7 @@ TEST_P(MergeOperatorPinningTest, EvictCacheBeforeMerge) {
|
|||||||
for (int i = 0; i < kNumOperands; i++) {
|
for (int i = 0; i < kNumOperands; i++) {
|
||||||
for (int j = 0; j < kNumKeys; j++) {
|
for (int j = 0; j < kNumKeys; j++) {
|
||||||
std::string k = Key(j);
|
std::string k = Key(j);
|
||||||
std::string v = RandomString(&rnd, kOperandSize);
|
std::string v = rnd.RandomString(kOperandSize);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), k, v));
|
ASSERT_OK(db_->Merge(WriteOptions(), k, v));
|
||||||
|
|
||||||
true_data[k] = std::max(true_data[k], v);
|
true_data[k] = std::max(true_data[k], v);
|
||||||
@ -620,7 +621,7 @@ TEST_P(PerConfigMergeOperatorPinningTest, Randomized) {
|
|||||||
// kNumPutBefore keys will have base values
|
// kNumPutBefore keys will have base values
|
||||||
for (int i = 0; i < kNumPutBefore; i++) {
|
for (int i = 0; i < kNumPutBefore; i++) {
|
||||||
std::string key = Key(rnd.Next() % kKeyRange);
|
std::string key = Key(rnd.Next() % kKeyRange);
|
||||||
std::string value = RandomString(&rnd, kOperandSize);
|
std::string value = rnd.RandomString(kOperandSize);
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), key, value));
|
ASSERT_OK(db_->Put(WriteOptions(), key, value));
|
||||||
|
|
||||||
true_data[key] = value;
|
true_data[key] = value;
|
||||||
@ -629,7 +630,7 @@ TEST_P(PerConfigMergeOperatorPinningTest, Randomized) {
|
|||||||
// Do kTotalMerges merges
|
// Do kTotalMerges merges
|
||||||
for (int i = 0; i < kTotalMerges; i++) {
|
for (int i = 0; i < kTotalMerges; i++) {
|
||||||
std::string key = Key(rnd.Next() % kKeyRange);
|
std::string key = Key(rnd.Next() % kKeyRange);
|
||||||
std::string value = RandomString(&rnd, kOperandSize);
|
std::string value = rnd.RandomString(kOperandSize);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), key, value));
|
ASSERT_OK(db_->Merge(WriteOptions(), key, value));
|
||||||
|
|
||||||
if (true_data[key] < value) {
|
if (true_data[key] < value) {
|
||||||
@ -640,7 +641,7 @@ TEST_P(PerConfigMergeOperatorPinningTest, Randomized) {
|
|||||||
// Overwrite random kNumPutAfter keys
|
// Overwrite random kNumPutAfter keys
|
||||||
for (int i = 0; i < kNumPutAfter; i++) {
|
for (int i = 0; i < kNumPutAfter; i++) {
|
||||||
std::string key = Key(rnd.Next() % kKeyRange);
|
std::string key = Key(rnd.Next() % kKeyRange);
|
||||||
std::string value = RandomString(&rnd, kOperandSize);
|
std::string value = rnd.RandomString(kOperandSize);
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), key, value));
|
ASSERT_OK(db_->Put(WriteOptions(), key, value));
|
||||||
|
|
||||||
true_data[key] = value;
|
true_data[key] = value;
|
||||||
|
@ -493,8 +493,7 @@ TEST_F(DBOptionsTest, SetDelayedWriteRateOption) {
|
|||||||
TEST_F(DBOptionsTest, MaxTotalWalSizeChange) {
|
TEST_F(DBOptionsTest, MaxTotalWalSizeChange) {
|
||||||
Random rnd(1044);
|
Random rnd(1044);
|
||||||
const auto value_size = size_t(1024);
|
const auto value_size = size_t(1024);
|
||||||
std::string value;
|
std::string value = rnd.RandomString(value_size);
|
||||||
test::RandomString(&rnd, value_size, &value);
|
|
||||||
|
|
||||||
Options options;
|
Options options;
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
@ -715,7 +714,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -746,7 +745,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -778,7 +777,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -842,7 +841,7 @@ TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
|
@ -126,8 +126,8 @@ TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (auto* handle : handles_) {
|
for (auto* handle : handles_) {
|
||||||
for (int i = 0; i < kKeyNum; ++i) {
|
for (int i = 0; i < kKeyNum; ++i) {
|
||||||
db_->Put(WriteOptions(), handle, RandomString(&rnd, kKeySize),
|
db_->Put(WriteOptions(), handle, rnd.RandomString(kKeySize),
|
||||||
RandomString(&rnd, kValueSize));
|
rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,18 +346,18 @@ TEST_F(DBPropertiesTest, AggregatedTableProperties) {
|
|||||||
Random rnd(5632);
|
Random rnd(5632);
|
||||||
for (int table = 1; table <= kTableCount; ++table) {
|
for (int table = 1; table <= kTableCount; ++table) {
|
||||||
for (int i = 0; i < kPutsPerTable; ++i) {
|
for (int i = 0; i < kPutsPerTable; ++i) {
|
||||||
db_->Put(WriteOptions(), RandomString(&rnd, kKeySize),
|
db_->Put(WriteOptions(), rnd.RandomString(kKeySize),
|
||||||
RandomString(&rnd, kValueSize));
|
rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kDeletionsPerTable; i++) {
|
for (int i = 0; i < kDeletionsPerTable; i++) {
|
||||||
db_->Delete(WriteOptions(), RandomString(&rnd, kKeySize));
|
db_->Delete(WriteOptions(), rnd.RandomString(kKeySize));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kMergeOperandsPerTable; i++) {
|
for (int i = 0; i < kMergeOperandsPerTable; i++) {
|
||||||
db_->Merge(WriteOptions(), RandomString(&rnd, kKeySize),
|
db_->Merge(WriteOptions(), rnd.RandomString(kKeySize),
|
||||||
RandomString(&rnd, kValueSize));
|
rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kRangeDeletionsPerTable; i++) {
|
for (int i = 0; i < kRangeDeletionsPerTable; i++) {
|
||||||
std::string start = RandomString(&rnd, kKeySize);
|
std::string start = rnd.RandomString(kKeySize);
|
||||||
std::string end = start;
|
std::string end = start;
|
||||||
end.resize(kValueSize);
|
end.resize(kValueSize);
|
||||||
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end);
|
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end);
|
||||||
@ -546,18 +546,18 @@ TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
|
|||||||
TableProperties tp, sum_tp, expected_tp;
|
TableProperties tp, sum_tp, expected_tp;
|
||||||
for (int table = 1; table <= kTableCount; ++table) {
|
for (int table = 1; table <= kTableCount; ++table) {
|
||||||
for (int i = 0; i < kPutsPerTable; ++i) {
|
for (int i = 0; i < kPutsPerTable; ++i) {
|
||||||
db_->Put(WriteOptions(), RandomString(&rnd, kKeySize),
|
db_->Put(WriteOptions(), rnd.RandomString(kKeySize),
|
||||||
RandomString(&rnd, kValueSize));
|
rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kDeletionsPerTable; i++) {
|
for (int i = 0; i < kDeletionsPerTable; i++) {
|
||||||
db_->Delete(WriteOptions(), RandomString(&rnd, kKeySize));
|
db_->Delete(WriteOptions(), rnd.RandomString(kKeySize));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kMergeOperandsPerTable; i++) {
|
for (int i = 0; i < kMergeOperandsPerTable; i++) {
|
||||||
db_->Merge(WriteOptions(), RandomString(&rnd, kKeySize),
|
db_->Merge(WriteOptions(), rnd.RandomString(kKeySize),
|
||||||
RandomString(&rnd, kValueSize));
|
rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kRangeDeletionsPerTable; i++) {
|
for (int i = 0; i < kRangeDeletionsPerTable; i++) {
|
||||||
std::string start = RandomString(&rnd, kKeySize);
|
std::string start = rnd.RandomString(kKeySize);
|
||||||
std::string end = start;
|
std::string end = start;
|
||||||
end.resize(kValueSize);
|
end.resize(kValueSize);
|
||||||
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end);
|
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end);
|
||||||
@ -920,7 +920,7 @@ TEST_F(DBPropertiesTest, ApproximateMemoryUsage) {
|
|||||||
for (int r = 0; r < kNumRounds; ++r) {
|
for (int r = 0; r < kNumRounds; ++r) {
|
||||||
for (int f = 0; f < kFlushesPerRound; ++f) {
|
for (int f = 0; f < kFlushesPerRound; ++f) {
|
||||||
for (int w = 0; w < kWritesPerFlush; ++w) {
|
for (int w = 0; w < kWritesPerFlush; ++w) {
|
||||||
Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize));
|
Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Make sure that there is no flush between getting the two properties.
|
// Make sure that there is no flush between getting the two properties.
|
||||||
@ -938,7 +938,7 @@ TEST_F(DBPropertiesTest, ApproximateMemoryUsage) {
|
|||||||
iters.push_back(db_->NewIterator(ReadOptions()));
|
iters.push_back(db_->NewIterator(ReadOptions()));
|
||||||
for (int f = 0; f < kFlushesPerRound; ++f) {
|
for (int f = 0; f < kFlushesPerRound; ++f) {
|
||||||
for (int w = 0; w < kWritesPerFlush; ++w) {
|
for (int w = 0; w < kWritesPerFlush; ++w) {
|
||||||
Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize));
|
Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Force flush to prevent flush from happening between getting the
|
// Force flush to prevent flush from happening between getting the
|
||||||
@ -1296,8 +1296,8 @@ TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) {
|
|||||||
|
|
||||||
const int kMaxKey = 1000;
|
const int kMaxKey = 1000;
|
||||||
for (int i = 0; i < kMaxKey; i++) {
|
for (int i = 0; i < kMaxKey; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(102)));
|
||||||
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/utilities/write_batch_with_index.h"
|
#include "rocksdb/utilities/write_batch_with_index.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -124,7 +125,7 @@ TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 12K (4 values, each 3K)
|
// Write 12K (4 values, each 3K)
|
||||||
for (int j = 0; j < kNumPerFile; j++) {
|
for (int j = 0; j < kNumPerFile; j++) {
|
||||||
values.push_back(RandomString(&rnd, 3 << 10));
|
values.push_back(rnd.RandomString(3 << 10));
|
||||||
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
|
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
|
||||||
if (j == 0 && i > 0) {
|
if (j == 0 && i > 0) {
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -172,7 +173,7 @@ TEST_F(DBRangeDelTest, MaxCompactionBytesCutsOutputFiles) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 1MB (256 values, each 4K)
|
// Write 1MB (256 values, each 4K)
|
||||||
for (int j = 0; j < kNumPerFile; j++) {
|
for (int j = 0; j < kNumPerFile; j++) {
|
||||||
values.push_back(RandomString(&rnd, kBytesPerVal));
|
values.push_back(rnd.RandomString(kBytesPerVal));
|
||||||
ASSERT_OK(Put(GetNumericStr(kNumPerFile * i + j), values[j]));
|
ASSERT_OK(Put(GetNumericStr(kNumPerFile * i + j), values[j]));
|
||||||
}
|
}
|
||||||
// extra entry to trigger SpecialSkipListFactory's flush
|
// extra entry to trigger SpecialSkipListFactory's flush
|
||||||
@ -378,7 +379,7 @@ TEST_F(DBRangeDelTest, ValidLevelSubcompactionBoundaries) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 100KB (100 values, each 1K)
|
// Write 100KB (100 values, each 1K)
|
||||||
for (int k = 0; k < kNumPerFile; k++) {
|
for (int k = 0; k < kNumPerFile; k++) {
|
||||||
values.push_back(RandomString(&rnd, 990));
|
values.push_back(rnd.RandomString(990));
|
||||||
ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k]));
|
ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k]));
|
||||||
}
|
}
|
||||||
// put extra key to trigger flush
|
// put extra key to trigger flush
|
||||||
@ -438,7 +439,7 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 100KB (100 values, each 1K)
|
// Write 100KB (100 values, each 1K)
|
||||||
for (int k = 0; k < kNumPerFile; k++) {
|
for (int k = 0; k < kNumPerFile; k++) {
|
||||||
values.push_back(RandomString(&rnd, 990));
|
values.push_back(rnd.RandomString(990));
|
||||||
ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k]));
|
ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k]));
|
||||||
}
|
}
|
||||||
// put extra key to trigger flush
|
// put extra key to trigger flush
|
||||||
@ -990,7 +991,7 @@ TEST_F(DBRangeDelTest, CompactionTreatsSplitInputLevelDeletionAtomically) {
|
|||||||
Key(2 * kNumFilesPerLevel));
|
Key(2 * kNumFilesPerLevel));
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value = RandomString(&rnd, kValueBytes);
|
std::string value = rnd.RandomString(kValueBytes);
|
||||||
for (int j = 0; j < kNumFilesPerLevel; ++j) {
|
for (int j = 0; j < kNumFilesPerLevel; ++j) {
|
||||||
// give files overlapping key-ranges to prevent trivial move
|
// give files overlapping key-ranges to prevent trivial move
|
||||||
ASSERT_OK(Put(Key(j), value));
|
ASSERT_OK(Put(Key(j), value));
|
||||||
@ -1063,7 +1064,7 @@ TEST_F(DBRangeDelTest, RangeTombstoneEndKeyAsSstableUpperBound) {
|
|||||||
// [key000000#3,1, key000004#72057594037927935,15]
|
// [key000000#3,1, key000004#72057594037927935,15]
|
||||||
// [key000001#5,1, key000002#6,1]
|
// [key000001#5,1, key000002#6,1]
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value = RandomString(&rnd, kValueBytes);
|
std::string value = rnd.RandomString(kValueBytes);
|
||||||
for (int j = 0; j < kNumFilesPerLevel; ++j) {
|
for (int j = 0; j < kNumFilesPerLevel; ++j) {
|
||||||
// Give files overlapping key-ranges to prevent a trivial move when we
|
// Give files overlapping key-ranges to prevent a trivial move when we
|
||||||
// compact from L0 to L1.
|
// compact from L0 to L1.
|
||||||
@ -1198,7 +1199,7 @@ TEST_F(DBRangeDelTest, KeyAtOverlappingEndpointReappears) {
|
|||||||
const Snapshot* snapshot = nullptr;
|
const Snapshot* snapshot = nullptr;
|
||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
||||||
auto value = RandomString(&rnd, kValueBytes);
|
auto value = rnd.RandomString(kValueBytes);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), "key", value));
|
ASSERT_OK(db_->Merge(WriteOptions(), "key", value));
|
||||||
}
|
}
|
||||||
if (i == kNumFiles - 1) {
|
if (i == kNumFiles - 1) {
|
||||||
@ -1282,7 +1283,7 @@ TEST_F(DBRangeDelTest, UntruncatedTombstoneDoesNotDeleteNewerKey) {
|
|||||||
const Snapshot* snapshots[] = {nullptr, nullptr};
|
const Snapshot* snapshots[] = {nullptr, nullptr};
|
||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
||||||
auto value = RandomString(&rnd, kValueBytes);
|
auto value = rnd.RandomString(kValueBytes);
|
||||||
std::string key;
|
std::string key;
|
||||||
if (i < kNumFiles / 2) {
|
if (i < kNumFiles / 2) {
|
||||||
key = Key(0);
|
key = Key(0);
|
||||||
@ -1328,7 +1329,7 @@ TEST_F(DBRangeDelTest, UntruncatedTombstoneDoesNotDeleteNewerKey) {
|
|||||||
// Now overwrite a few keys that are in L1 files that definitely don't have
|
// Now overwrite a few keys that are in L1 files that definitely don't have
|
||||||
// overlapping boundary keys.
|
// overlapping boundary keys.
|
||||||
for (int i = kMaxKey; i > kMaxKey - kKeysOverwritten; --i) {
|
for (int i = kMaxKey; i > kMaxKey - kKeysOverwritten; --i) {
|
||||||
auto value = RandomString(&rnd, kValueBytes);
|
auto value = rnd.RandomString(kValueBytes);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), Key(i), value));
|
ASSERT_OK(db_->Merge(WriteOptions(), Key(i), value));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db_->Flush(FlushOptions()));
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
@ -1375,7 +1376,7 @@ TEST_F(DBRangeDelTest, DeletedMergeOperandReappearsIterPrev) {
|
|||||||
const Snapshot* snapshot = nullptr;
|
const Snapshot* snapshot = nullptr;
|
||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
||||||
auto value = RandomString(&rnd, kValueBytes);
|
auto value = rnd.RandomString(kValueBytes);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), Key(j % kNumKeys), value));
|
ASSERT_OK(db_->Merge(WriteOptions(), Key(j % kNumKeys), value));
|
||||||
if (i == 0 && j == kNumKeys) {
|
if (i == 0 && j == kNumKeys) {
|
||||||
// Take snapshot to prevent covered merge operands from being dropped or
|
// Take snapshot to prevent covered merge operands from being dropped or
|
||||||
@ -1515,7 +1516,7 @@ TEST_F(DBRangeDelTest, RangeTombstoneWrittenToMinimalSsts) {
|
|||||||
for (int i = 0; i < kFileBytes / kValueBytes; ++i) {
|
for (int i = 0; i < kFileBytes / kValueBytes; ++i) {
|
||||||
std::string key(1, first_char);
|
std::string key(1, first_char);
|
||||||
key.append(Key(i));
|
key.append(Key(i));
|
||||||
std::string value = RandomString(&rnd, kValueBytes);
|
std::string value = rnd.RandomString(kValueBytes);
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
}
|
}
|
||||||
db_->Flush(FlushOptions());
|
db_->Flush(FlushOptions());
|
||||||
@ -1597,7 +1598,7 @@ TEST_F(DBRangeDelTest, OverlappedTombstones) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 12K (4 values, each 3K)
|
// Write 12K (4 values, each 3K)
|
||||||
for (int j = 0; j < kNumPerFile; j++) {
|
for (int j = 0; j < kNumPerFile; j++) {
|
||||||
values.push_back(RandomString(&rnd, 3 << 10));
|
values.push_back(rnd.RandomString(3 << 10));
|
||||||
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
|
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1636,7 +1637,7 @@ TEST_F(DBRangeDelTest, OverlappedKeys) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 12K (4 values, each 3K)
|
// Write 12K (4 values, each 3K)
|
||||||
for (int j = 0; j < kNumPerFile; j++) {
|
for (int j = 0; j < kNumPerFile; j++) {
|
||||||
values.push_back(RandomString(&rnd, 3 << 10));
|
values.push_back(rnd.RandomString(3 << 10));
|
||||||
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
|
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/sst_file_manager.h"
|
#include "rocksdb/sst_file_manager.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -163,7 +164,7 @@ TEST_F(DBSSTTest, DontDeleteMovedFile) {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
// Create 1MB sst file
|
// Create 1MB sst file
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
|
ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -211,7 +212,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
// Create 1MB sst file
|
// Create 1MB sst file
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
|
ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -242,7 +243,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
|
|||||||
// write_buffer_size. The flush will be blocked with block_first_time
|
// write_buffer_size. The flush will be blocked with block_first_time
|
||||||
// pending_file is protecting all the files created after
|
// pending_file is protecting all the files created after
|
||||||
for (int j = 0; j < 256; ++j) {
|
for (int j = 0; j < 256; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(10 * 1024)));
|
||||||
}
|
}
|
||||||
blocking_thread.WaitUntilSleeping();
|
blocking_thread.WaitUntilSleeping();
|
||||||
|
|
||||||
@ -758,7 +759,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowed) {
|
|||||||
|
|
||||||
// Generate a file containing 100 keys.
|
// Generate a file containing 100 keys.
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -799,7 +800,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) {
|
|||||||
|
|
||||||
// Generate a file containing 10 keys.
|
// Generate a file containing 10 keys.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
uint64_t total_file_size = 0;
|
uint64_t total_file_size = 0;
|
||||||
@ -809,7 +810,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) {
|
|||||||
|
|
||||||
// Generate another file to trigger compaction.
|
// Generate another file to trigger compaction.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForCompact(true);
|
dbfull()->TEST_WaitForCompact(true);
|
||||||
@ -846,7 +847,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
|
|||||||
|
|
||||||
// Generate a file containing 10 keys.
|
// Generate a file containing 10 keys.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
uint64_t total_file_size = 0;
|
uint64_t total_file_size = 0;
|
||||||
@ -856,7 +857,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
|
|||||||
|
|
||||||
// Generate another file to trigger compaction.
|
// Generate another file to trigger compaction.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -953,7 +954,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) {
|
|||||||
// It is easy to detect if the test is stuck in a loop. No need for
|
// It is easy to detect if the test is stuck in a loop. No need for
|
||||||
// complex termination logic.
|
// complex termination logic.
|
||||||
while (true) {
|
while (true) {
|
||||||
auto s = Put(RandomString(&rnd, 10), RandomString(&rnd, 50));
|
auto s = Put(rnd.RandomString(10), rnd.RandomString(50));
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include "monitoring/thread_status_util.h"
|
#include "monitoring/thread_status_util.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/statistics.h"
|
#include "rocksdb/statistics.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -55,7 +56,7 @@ TEST_F(DBStatisticsTest, CompressionStatsTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < kNumKeysWritten; ++i) {
|
for (int i = 0; i < kNumKeysWritten; ++i) {
|
||||||
// compressible string
|
// compressible string
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a')));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED), 0);
|
ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED), 0);
|
||||||
@ -75,7 +76,7 @@ TEST_F(DBStatisticsTest, CompressionStatsTest) {
|
|||||||
// Check that compressions do not occur when turned off
|
// Check that compressions do not occur when turned off
|
||||||
for (int i = 0; i < kNumKeysWritten; ++i) {
|
for (int i = 0; i < kNumKeysWritten; ++i) {
|
||||||
// compressible string
|
// compressible string
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a')));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED)
|
ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED)
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include "rocksdb/utilities/table_properties_collectors.h"
|
#include "rocksdb/utilities/table_properties_collectors.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
@ -155,12 +156,12 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) {
|
|||||||
|
|
||||||
// build a decent LSM
|
// build a decent LSM
|
||||||
for (int i = 0; i < 10000; i++) {
|
for (int i = 0; i < 10000; i++) {
|
||||||
ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
if (NumTableFilesAtLevel(0) == 0) {
|
if (NumTableFilesAtLevel(0) == 0) {
|
||||||
ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102)));
|
ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102)));
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
150
db/db_test.cc
150
db/db_test.cc
@ -64,6 +64,7 @@
|
|||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "util/compression.h"
|
#include "util/compression.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/rate_limiter.h"
|
#include "util/rate_limiter.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
@ -914,7 +915,7 @@ TEST_F(DBTest, FlushSchedule) {
|
|||||||
WriteOptions wo;
|
WriteOptions wo;
|
||||||
// this should fill up 2 memtables
|
// this should fill up 2 memtables
|
||||||
for (int k = 0; k < 5000; ++k) {
|
for (int k = 0; k < 5000; ++k) {
|
||||||
ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), ""));
|
ASSERT_OK(db_->Put(wo, handles_[a & 1], rnd.RandomString(13), ""));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1171,7 +1172,7 @@ void MinLevelHelper(DBTest* self, Options& options) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 120KB (12 values, each 10K)
|
// Write 120KB (12 values, each 10K)
|
||||||
for (int i = 0; i < 12; i++) {
|
for (int i = 0; i < 12; i++) {
|
||||||
values.push_back(DBTestBase::RandomString(&rnd, 10000));
|
values.push_back(rnd.RandomString(10000));
|
||||||
ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
|
ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
|
||||||
}
|
}
|
||||||
self->dbfull()->TEST_WaitForFlushMemTable();
|
self->dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -1181,7 +1182,7 @@ void MinLevelHelper(DBTest* self, Options& options) {
|
|||||||
// generate one more file in level-0, and should trigger level-0 compaction
|
// generate one more file in level-0, and should trigger level-0 compaction
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < 12; i++) {
|
for (int i = 0; i < 12; i++) {
|
||||||
values.push_back(DBTestBase::RandomString(&rnd, 10000));
|
values.push_back(rnd.RandomString(10000));
|
||||||
ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
|
ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
|
||||||
}
|
}
|
||||||
self->dbfull()->TEST_WaitForCompact();
|
self->dbfull()->TEST_WaitForCompact();
|
||||||
@ -1294,7 +1295,7 @@ TEST_F(DBTest, DISABLED_RepeatedWritesToSameKey) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value =
|
std::string value =
|
||||||
RandomString(&rnd, static_cast<int>(2 * options.write_buffer_size));
|
rnd.RandomString(static_cast<int>(2 * options.write_buffer_size));
|
||||||
for (int i = 0; i < 5 * kMaxFiles; i++) {
|
for (int i = 0; i < 5 * kMaxFiles; i++) {
|
||||||
ASSERT_OK(Put(1, "key", value));
|
ASSERT_OK(Put(1, "key", value));
|
||||||
ASSERT_LE(TotalTableFiles(1), kMaxFiles);
|
ASSERT_LE(TotalTableFiles(1), kMaxFiles);
|
||||||
@ -1370,7 +1371,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
|||||||
const int N = 128;
|
const int N = 128;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t size;
|
uint64_t size;
|
||||||
@ -1394,7 +1395,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
|||||||
ASSERT_EQ(size, 0);
|
ASSERT_EQ(size, 0);
|
||||||
|
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
start = Key(500);
|
start = Key(500);
|
||||||
@ -1426,7 +1427,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
|||||||
RandomShuffle(std::begin(keys), std::end(keys), rnd.Next());
|
RandomShuffle(std::begin(keys), std::end(keys), rnd.Next());
|
||||||
|
|
||||||
for (int i = 0; i < N * 3; i++) {
|
for (int i = 0; i < N * 3; i++) {
|
||||||
ASSERT_OK(Put(Key(keys[i] + 1000), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(keys[i] + 1000), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
start = Key(100);
|
start = Key(100);
|
||||||
@ -1460,7 +1461,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
|||||||
Flush();
|
Flush();
|
||||||
|
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(Key(i + 1000), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i + 1000), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
start = Key(1050);
|
start = Key(1050);
|
||||||
@ -1508,7 +1509,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
|
|||||||
const int N = 64000;
|
const int N = 64000;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 24)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(24)));
|
||||||
}
|
}
|
||||||
// Flush everything to files
|
// Flush everything to files
|
||||||
Flush();
|
Flush();
|
||||||
@ -1517,7 +1518,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
|
|||||||
|
|
||||||
// Write more keys
|
// Write more keys
|
||||||
for (int i = N; i < (N + N / 4); i++) {
|
for (int i = N; i < (N + N / 4); i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 24)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(24)));
|
||||||
}
|
}
|
||||||
// Flush everything to files again
|
// Flush everything to files again
|
||||||
Flush();
|
Flush();
|
||||||
@ -1576,7 +1577,7 @@ TEST_F(DBTest, GetApproximateMemTableStats) {
|
|||||||
const int N = 128;
|
const int N = 128;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t count;
|
uint64_t count;
|
||||||
@ -1608,7 +1609,7 @@ TEST_F(DBTest, GetApproximateMemTableStats) {
|
|||||||
ASSERT_EQ(size, 0);
|
ASSERT_EQ(size, 0);
|
||||||
|
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
start = Key(100);
|
start = Key(100);
|
||||||
@ -1639,7 +1640,7 @@ TEST_F(DBTest, ApproximateSizes) {
|
|||||||
static const int S2 = 105000; // Allow some expansion from metadata
|
static const int S2 = 105000; // Allow some expansion from metadata
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, S1)));
|
ASSERT_OK(Put(1, Key(i), rnd.RandomString(S1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0 because GetApproximateSizes() does not account for memtable space
|
// 0 because GetApproximateSizes() does not account for memtable space
|
||||||
@ -1682,15 +1683,15 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
|
|||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string big1 = RandomString(&rnd, 100000);
|
std::string big1 = rnd.RandomString(100000);
|
||||||
ASSERT_OK(Put(1, Key(0), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(0), rnd.RandomString(10000)));
|
||||||
ASSERT_OK(Put(1, Key(1), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(1), rnd.RandomString(10000)));
|
||||||
ASSERT_OK(Put(1, Key(2), big1));
|
ASSERT_OK(Put(1, Key(2), big1));
|
||||||
ASSERT_OK(Put(1, Key(3), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(3), rnd.RandomString(10000)));
|
||||||
ASSERT_OK(Put(1, Key(4), big1));
|
ASSERT_OK(Put(1, Key(4), big1));
|
||||||
ASSERT_OK(Put(1, Key(5), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(5), rnd.RandomString(10000)));
|
||||||
ASSERT_OK(Put(1, Key(6), RandomString(&rnd, 300000)));
|
ASSERT_OK(Put(1, Key(6), rnd.RandomString(300000)));
|
||||||
ASSERT_OK(Put(1, Key(7), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000)));
|
||||||
|
|
||||||
// Check sizes across recovery by reopening a few times
|
// Check sizes across recovery by reopening a few times
|
||||||
for (int run = 0; run < 3; run++) {
|
for (int run = 0; run < 3; run++) {
|
||||||
@ -1797,7 +1798,7 @@ TEST_F(DBTest, HiddenValuesAreRemoved) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
FillLevels("a", "z", 1);
|
FillLevels("a", "z", 1);
|
||||||
|
|
||||||
std::string big = RandomString(&rnd, 50000);
|
std::string big = rnd.RandomString(50000);
|
||||||
Put(1, "foo", big);
|
Put(1, "foo", big);
|
||||||
Put(1, "pastfoo", "v");
|
Put(1, "pastfoo", "v");
|
||||||
const Snapshot* snapshot = db_->GetSnapshot();
|
const Snapshot* snapshot = db_->GetSnapshot();
|
||||||
@ -2187,7 +2188,7 @@ TEST_F(DBTest, SnapshotFiles) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < 80; i++) {
|
for (int i = 0; i < 80; i++) {
|
||||||
values.push_back(RandomString(&rnd, 100000));
|
values.push_back(rnd.RandomString(100000));
|
||||||
ASSERT_OK(Put((i < 40), Key(i), values[i]));
|
ASSERT_OK(Put((i < 40), Key(i), values[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2240,7 +2241,7 @@ TEST_F(DBTest, SnapshotFiles) {
|
|||||||
// overwrite one key, this key should not appear in the snapshot
|
// overwrite one key, this key should not appear in the snapshot
|
||||||
std::vector<std::string> extras;
|
std::vector<std::string> extras;
|
||||||
for (unsigned int i = 0; i < 1; i++) {
|
for (unsigned int i = 0; i < 1; i++) {
|
||||||
extras.push_back(RandomString(&rnd, 100000));
|
extras.push_back(rnd.RandomString(100000));
|
||||||
ASSERT_OK(Put(0, Key(i), extras[i]));
|
ASSERT_OK(Put(0, Key(i), extras[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3206,8 +3207,8 @@ TEST_P(DBTestRandomized, Randomized) {
|
|||||||
}
|
}
|
||||||
if (p < 45) { // Put
|
if (p < 45) { // Put
|
||||||
k = RandomKey(&rnd, minimum);
|
k = RandomKey(&rnd, minimum);
|
||||||
v = RandomString(&rnd,
|
v = rnd.RandomString(rnd.OneIn(20) ? 100 + rnd.Uniform(100)
|
||||||
rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
|
: rnd.Uniform(8));
|
||||||
ASSERT_OK(model.Put(WriteOptions(), k, v));
|
ASSERT_OK(model.Put(WriteOptions(), k, v));
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), k, v));
|
ASSERT_OK(db_->Put(WriteOptions(), k, v));
|
||||||
} else if (p < 90) { // Delete
|
} else if (p < 90) { // Delete
|
||||||
@ -3225,7 +3226,7 @@ TEST_P(DBTestRandomized, Randomized) {
|
|||||||
// we have multiple entries in the write batch for the same key
|
// we have multiple entries in the write batch for the same key
|
||||||
}
|
}
|
||||||
if (rnd.OneIn(2)) {
|
if (rnd.OneIn(2)) {
|
||||||
v = RandomString(&rnd, rnd.Uniform(10));
|
v = rnd.RandomString(rnd.Uniform(10));
|
||||||
b.Put(k, v);
|
b.Put(k, v);
|
||||||
} else {
|
} else {
|
||||||
b.Delete(k);
|
b.Delete(k);
|
||||||
@ -3395,7 +3396,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
for (int j = 0; j < 110; ++j) {
|
for (int j = 0; j < 110; ++j) {
|
||||||
ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 100 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
// flush should happen here
|
// flush should happen here
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
@ -3433,7 +3434,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3444,7 +3445,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3474,9 +3475,9 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Each file contains a different key which will be dropped later.
|
// Each file contains a different key which will be dropped later.
|
||||||
ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
|
ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
|
||||||
ASSERT_OK(Put("key" + ToString(i), ""));
|
ASSERT_OK(Put("key" + ToString(i), ""));
|
||||||
ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
|
ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
@ -3486,9 +3487,9 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
|
|||||||
}
|
}
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Each file contains a different key which will be dropped later.
|
// Each file contains a different key which will be dropped later.
|
||||||
ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
|
ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
|
||||||
ASSERT_OK(Delete("key" + ToString(i)));
|
ASSERT_OK(Delete("key" + ToString(i)));
|
||||||
ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
|
ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
@ -3558,7 +3559,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3593,7 +3594,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3609,7 +3610,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
// Create 1 more file to trigger TTL compaction. The old files are dropped.
|
// Create 1 more file to trigger TTL compaction. The old files are dropped.
|
||||||
for (int i = 0; i < 1; i++) {
|
for (int i = 0; i < 1; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -3635,7 +3636,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3650,7 +3651,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
|
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
for (int j = 0; j < 140; j++) {
|
for (int j = 0; j < 140; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3673,7 +3674,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
// Generate and flush a file about 10KB.
|
// Generate and flush a file about 10KB.
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3692,7 +3693,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
// Create 10 more files. The old 5 files are dropped as their ttl expired.
|
// Create 10 more files. The old 5 files are dropped as their ttl expired.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3717,7 +3718,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3728,7 +3729,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
|
|||||||
for (int i = 0; i < 60; i++) {
|
for (int i = 0; i < 60; i++) {
|
||||||
// Generate and flush a file about 20KB.
|
// Generate and flush a file about 20KB.
|
||||||
for (int j = 0; j < 20; j++) {
|
for (int j = 0; j < 20; j++) {
|
||||||
ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
|
ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
@ -3771,8 +3772,7 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) {
|
|||||||
uint64_t start = env_->NowMicros();
|
uint64_t start = env_->NowMicros();
|
||||||
// Write ~96M data
|
// Write ~96M data
|
||||||
for (int64_t i = 0; i < (96 << 10); ++i) {
|
for (int64_t i = 0; i < (96 << 10); ++i) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
|
||||||
Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
|
|
||||||
}
|
}
|
||||||
uint64_t elapsed = env_->NowMicros() - start;
|
uint64_t elapsed = env_->NowMicros() - start;
|
||||||
double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
|
double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
|
||||||
@ -3790,8 +3790,7 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) {
|
|||||||
start = env_->NowMicros();
|
start = env_->NowMicros();
|
||||||
// Write ~96M data
|
// Write ~96M data
|
||||||
for (int64_t i = 0; i < (96 << 10); ++i) {
|
for (int64_t i = 0; i < (96 << 10); ++i) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
|
||||||
Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
|
|
||||||
}
|
}
|
||||||
rate_limiter_drains =
|
rate_limiter_drains =
|
||||||
TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
|
TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
|
||||||
@ -3816,8 +3815,7 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) {
|
|||||||
start = env_->NowMicros();
|
start = env_->NowMicros();
|
||||||
// Write ~96M data
|
// Write ~96M data
|
||||||
for (int64_t i = 0; i < (96 << 10); ++i) {
|
for (int64_t i = 0; i < (96 << 10); ++i) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
|
||||||
Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
|
|
||||||
}
|
}
|
||||||
elapsed = env_->NowMicros() - start;
|
elapsed = env_->NowMicros() - start;
|
||||||
rate_limiter_drains =
|
rate_limiter_drains =
|
||||||
@ -4018,7 +4016,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
const int kNumPutsBeforeWaitForFlush = 64;
|
const int kNumPutsBeforeWaitForFlush = 64;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
|
||||||
|
|
||||||
// The following condition prevents a race condition between flush jobs
|
// The following condition prevents a race condition between flush jobs
|
||||||
// acquiring work and this thread filling up multiple memtables. Without
|
// acquiring work and this thread filling up multiple memtables. Without
|
||||||
@ -4092,7 +4090,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
while (!sleeping_task_low.WokenUp() && count < 256) {
|
while (!sleeping_task_low.WokenUp() && count < 256) {
|
||||||
ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
|
ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
ASSERT_GT(static_cast<double>(count), 128 * 0.8);
|
ASSERT_GT(static_cast<double>(count), 128 * 0.8);
|
||||||
@ -4112,7 +4110,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
Env::Priority::LOW);
|
Env::Priority::LOW);
|
||||||
count = 0;
|
count = 0;
|
||||||
while (!sleeping_task_low.WokenUp() && count < 1024) {
|
while (!sleeping_task_low.WokenUp() && count < 1024) {
|
||||||
ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
|
ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
// Windows fails this test. Will tune in the future and figure out
|
// Windows fails this test. Will tune in the future and figure out
|
||||||
@ -4136,7 +4134,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (!sleeping_task_low.WokenUp() && count < 1024) {
|
while (!sleeping_task_low.WokenUp() && count < 1024) {
|
||||||
ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
|
ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
// Windows fails this test. Will tune in the future and figure out
|
// Windows fails this test. Will tune in the future and figure out
|
||||||
@ -4323,7 +4321,7 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) {
|
|||||||
for (int file = 0; file < kNumL0Files; ++file) {
|
for (int file = 0; file < kNumL0Files; ++file) {
|
||||||
for (int key = 0; key < kEntriesPerBuffer; ++key) {
|
for (int key = 0; key < kEntriesPerBuffer; ++key) {
|
||||||
ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
|
ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
|
||||||
RandomString(&rnd, kTestValueSize)));
|
rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4471,7 +4469,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
|
|||||||
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
||||||
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
||||||
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
||||||
ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
|
ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Status s = env_->GetThreadList(&thread_list);
|
Status s = env_->GetThreadList(&thread_list);
|
||||||
@ -4558,7 +4556,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
|
|||||||
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
|
||||||
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
for (int file = 0; file < 16 * kNumL0Files; ++file) {
|
||||||
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
for (int k = 0; k < kEntriesPerBuffer; ++k) {
|
||||||
ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
|
ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Status s = env_->GetThreadList(&thread_list);
|
Status s = env_->GetThreadList(&thread_list);
|
||||||
@ -4744,7 +4742,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
std::string value = RandomString(&rnd, 200);
|
std::string value = rnd.RandomString(200);
|
||||||
ASSERT_OK(Put(Key(keys[i]), value));
|
ASSERT_OK(Put(Key(keys[i]), value));
|
||||||
if (i % 25 == 24) {
|
if (i % 25 == 24) {
|
||||||
Flush();
|
Flush();
|
||||||
@ -4789,7 +4787,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
for (int i = 101; i < 500; i++) {
|
for (int i = 101; i < 500; i++) {
|
||||||
std::string value = RandomString(&rnd, 200);
|
std::string value = rnd.RandomString(200);
|
||||||
ASSERT_OK(Put(Key(keys[i]), value));
|
ASSERT_OK(Put(Key(keys[i]), value));
|
||||||
if (i % 100 == 99) {
|
if (i % 100 == 99) {
|
||||||
Flush();
|
Flush();
|
||||||
@ -4841,7 +4839,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
auto gen_l0_kb = [this](int start, int size, int stride) {
|
auto gen_l0_kb = [this](int start, int size, int stride) {
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
ASSERT_OK(Put(Key(start + stride * i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(start + stride * i), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
};
|
};
|
||||||
@ -4936,7 +4934,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
WriteOptions wo;
|
WriteOptions wo;
|
||||||
while (count < 64) {
|
while (count < 64) {
|
||||||
ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
|
ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo));
|
||||||
dbfull()->TEST_FlushMemTable(true, true);
|
dbfull()->TEST_FlushMemTable(true, true);
|
||||||
count++;
|
count++;
|
||||||
if (dbfull()->TEST_write_controler().IsStopped()) {
|
if (dbfull()->TEST_write_controler().IsStopped()) {
|
||||||
@ -4964,7 +4962,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
sleeping_task_low.WaitUntilSleeping();
|
sleeping_task_low.WaitUntilSleeping();
|
||||||
count = 0;
|
count = 0;
|
||||||
while (count < 64) {
|
while (count < 64) {
|
||||||
ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
|
ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo));
|
||||||
dbfull()->TEST_FlushMemTable(true, true);
|
dbfull()->TEST_FlushMemTable(true, true);
|
||||||
count++;
|
count++;
|
||||||
if (dbfull()->TEST_write_controler().IsStopped()) {
|
if (dbfull()->TEST_write_controler().IsStopped()) {
|
||||||
@ -4986,7 +4984,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
||||||
|
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
|
||||||
// Wait for compaction so that put won't stop
|
// Wait for compaction so that put won't stop
|
||||||
dbfull()->TEST_FlushMemTable(true);
|
dbfull()->TEST_FlushMemTable(true);
|
||||||
}
|
}
|
||||||
@ -5000,7 +4998,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
||||||
|
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
|
||||||
// Wait for compaction so that put won't stop
|
// Wait for compaction so that put won't stop
|
||||||
dbfull()->TEST_FlushMemTable(true);
|
dbfull()->TEST_FlushMemTable(true);
|
||||||
}
|
}
|
||||||
@ -5174,7 +5172,7 @@ TEST_F(DBTest, FileCreationRandomFailure) {
|
|||||||
}
|
}
|
||||||
for (int k = 0; k < kTestSize; ++k) {
|
for (int k = 0; k < kTestSize; ++k) {
|
||||||
// here we expect some of the Put fails.
|
// here we expect some of the Put fails.
|
||||||
std::string value = RandomString(&rnd, 100);
|
std::string value = rnd.RandomString(100);
|
||||||
Status s = Put(Key(k), Slice(value));
|
Status s = Put(Key(k), Slice(value));
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// update the latest successful put
|
// update the latest successful put
|
||||||
@ -5223,11 +5221,11 @@ TEST_F(DBTest, DynamicMiscOptions) {
|
|||||||
int key1 = key_start + 1;
|
int key1 = key_start + 1;
|
||||||
int key2 = key_start + 2;
|
int key2 = key_start + 2;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
ASSERT_OK(Put(Key(key0), RandomString(&rnd, 8)));
|
ASSERT_OK(Put(Key(key0), rnd.RandomString(8)));
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
ASSERT_OK(Put(Key(key1), RandomString(&rnd, 8)));
|
ASSERT_OK(Put(Key(key1), rnd.RandomString(8)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Put(Key(key2), RandomString(&rnd, 8)));
|
ASSERT_OK(Put(Key(key2), rnd.RandomString(8)));
|
||||||
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
|
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
|
||||||
iter->Seek(Key(key1));
|
iter->Seek(Key(key1));
|
||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
@ -5367,7 +5365,7 @@ TEST_F(DBTest, EncodeDecompressedBlockSizeTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < kNumKeysWritten; ++i) {
|
for (int i = 0; i < kNumKeysWritten; ++i) {
|
||||||
// compressible string
|
// compressible string
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a')));
|
||||||
}
|
}
|
||||||
|
|
||||||
table_options.format_version = first_table_version == 1 ? 2 : 1;
|
table_options.format_version = first_table_version == 1 ? 2 : 1;
|
||||||
@ -5712,7 +5710,7 @@ TEST_F(DBTest, PromoteL0) {
|
|||||||
std::map<int32_t, std::string> values;
|
std::map<int32_t, std::string> values;
|
||||||
for (const auto& range : ranges) {
|
for (const auto& range : ranges) {
|
||||||
for (int32_t j = range.first; j < range.second; j++) {
|
for (int32_t j = range.first; j < range.second; j++) {
|
||||||
values[j] = RandomString(&rnd, value_size);
|
values[j] = rnd.RandomString(value_size);
|
||||||
ASSERT_OK(Put(Key(j), values[j]));
|
ASSERT_OK(Put(Key(j), values[j]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
@ -5773,7 +5771,7 @@ TEST_F(DBTest, CompactRangeWithEmptyBottomLevel) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < kNumL0Files; ++i) {
|
for (int i = 0; i < kNumL0Files; ++i) {
|
||||||
ASSERT_OK(Put(Key(0), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(0), rnd.RandomString(1024)));
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files);
|
ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files);
|
||||||
@ -5812,7 +5810,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
// put two keys to ensure no trivial move
|
// put two keys to ensure no trivial move
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -5826,7 +5824,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
|
|||||||
for (int i = 0; i < kNumL0Files; ++i) {
|
for (int i = 0; i < kNumL0Files; ++i) {
|
||||||
// put two keys to ensure no trivial move
|
// put two keys to ensure no trivial move
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -5855,7 +5853,7 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) {
|
|||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
// put two keys to ensure no trivial move
|
// put two keys to ensure no trivial move
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -5885,7 +5883,7 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) {
|
|||||||
// generate enough files to trigger compaction
|
// generate enough files to trigger compaction
|
||||||
for (int i = 0; i < 20; ++i) {
|
for (int i = 0; i < 20; ++i) {
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -6496,7 +6494,7 @@ TEST_F(DBTest, PauseBackgroundWorkTest) {
|
|||||||
threads.emplace_back([&]() {
|
threads.emplace_back([&]() {
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 10000; ++i) {
|
for (int i = 0; i < 10000; ++i) {
|
||||||
Put(RandomString(&rnd, 10), RandomString(&rnd, 10));
|
Put(rnd.RandomString(10), rnd.RandomString(10));
|
||||||
}
|
}
|
||||||
done.store(true);
|
done.store(true);
|
||||||
});
|
});
|
||||||
@ -6626,7 +6624,7 @@ TEST_F(DBTest, CreationTimeOfOldestFile) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -6651,7 +6649,7 @@ TEST_F(DBTest, CreationTimeOfOldestFile) {
|
|||||||
for (int i = 0; i < kNumLevelFiles; ++i) {
|
for (int i = 0; i < kNumLevelFiles; ++i) {
|
||||||
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
for (int j = 0; j < kNumKeysPerFile; ++j) {
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
|
Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,8 @@
|
|||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/persistent_cache.h"
|
#include "rocksdb/persistent_cache.h"
|
||||||
#include "rocksdb/wal_filter.h"
|
#include "rocksdb/wal_filter.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -166,7 +167,7 @@ TEST_F(DBTest2, PartitionedIndexUserToInternalKey) {
|
|||||||
|
|
||||||
for (int i = 0; i < 3000; i++) {
|
for (int i = 0; i < 3000; i++) {
|
||||||
int j = i % 30;
|
int j = i % 30;
|
||||||
std::string value = RandomString(&rnd, 10500);
|
std::string value = rnd.RandomString(10500);
|
||||||
ASSERT_OK(Put("keykey_" + std::to_string(j), value));
|
ASSERT_OK(Put("keykey_" + std::to_string(j), value));
|
||||||
snapshots.push_back(db_->GetSnapshot());
|
snapshots.push_back(db_->GetSnapshot());
|
||||||
}
|
}
|
||||||
@ -1274,7 +1275,7 @@ TEST_F(DBTest2, PresetCompressionDict) {
|
|||||||
std::string seq_datas[10];
|
std::string seq_datas[10];
|
||||||
for (int j = 0; j < 10; ++j) {
|
for (int j = 0; j < 10; ++j) {
|
||||||
seq_datas[j] =
|
seq_datas[j] =
|
||||||
RandomString(&rnd, kBlockSizeBytes - kApproxPerBlockOverheadBytes);
|
rnd.RandomString(kBlockSizeBytes - kApproxPerBlockOverheadBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_EQ(0, NumTableFilesAtLevel(0, 1));
|
ASSERT_EQ(0, NumTableFilesAtLevel(0, 1));
|
||||||
@ -1349,7 +1350,7 @@ TEST_F(DBTest2, PresetCompressionDictLocality) {
|
|||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
||||||
ASSERT_OK(Put(Key(i * kNumEntriesPerFile + j),
|
ASSERT_OK(Put(Key(i * kNumEntriesPerFile + j),
|
||||||
RandomString(&rnd, kNumBytesPerEntry)));
|
rnd.RandomString(kNumBytesPerEntry)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
MoveFilesToLevel(1);
|
MoveFilesToLevel(1);
|
||||||
@ -1519,9 +1520,9 @@ TEST_P(CompressionFailuresTest, CompressionFailures) {
|
|||||||
// Write 10 random files
|
// Write 10 random files
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 5; j++) {
|
for (int j = 0; j < 5; j++) {
|
||||||
std::string key = RandomString(&rnd, kKeySize);
|
std::string key = rnd.RandomString(kKeySize);
|
||||||
// Ensure good compression ratio
|
// Ensure good compression ratio
|
||||||
std::string valueUnit = RandomString(&rnd, kValUnitSize);
|
std::string valueUnit = rnd.RandomString(kValUnitSize);
|
||||||
std::string value;
|
std::string value;
|
||||||
for (int k = 0; k < kValSize; k += kValUnitSize) {
|
for (int k = 0; k < kValSize; k += kValUnitSize) {
|
||||||
value += valueUnit;
|
value += valueUnit;
|
||||||
@ -1623,8 +1624,8 @@ TEST_F(DBTest2, CompressionOptions) {
|
|||||||
// Write 10 random files
|
// Write 10 random files
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int j = 0; j < 5; j++) {
|
for (int j = 0; j < 5; j++) {
|
||||||
std::string key = RandomString(&rnd, kKeySize);
|
std::string key = rnd.RandomString(kKeySize);
|
||||||
std::string value = RandomString(&rnd, kValSize);
|
std::string value = rnd.RandomString(kValSize);
|
||||||
key_value_written[key] = value;
|
key_value_written[key] = value;
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
}
|
}
|
||||||
@ -1696,7 +1697,7 @@ TEST_F(DBTest2, CompactionStall) {
|
|||||||
// 4 Files in L0
|
// 4 Files in L0
|
||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10)));
|
ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -1711,7 +1712,7 @@ TEST_F(DBTest2, CompactionStall) {
|
|||||||
// Another 6 L0 files to trigger compaction again
|
// Another 6 L0 files to trigger compaction again
|
||||||
for (int i = 0; i < 6; i++) {
|
for (int i = 0; i < 6; i++) {
|
||||||
for (int j = 0; j < 10; j++) {
|
for (int j = 0; j < 10; j++) {
|
||||||
ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10)));
|
ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -2311,7 +2312,7 @@ TEST_F(DBTest2, PersistentCache) {
|
|||||||
std::string str;
|
std::string str;
|
||||||
for (int i = 0; i < num_iter; i++) {
|
for (int i = 0; i < num_iter; i++) {
|
||||||
if (i % 4 == 0) { // high compression ratio
|
if (i % 4 == 0) { // high compression ratio
|
||||||
str = RandomString(&rnd, 1000);
|
str = rnd.RandomString(1000);
|
||||||
}
|
}
|
||||||
values.push_back(str);
|
values.push_back(str);
|
||||||
ASSERT_OK(Put(1, Key(i), values[i]));
|
ASSERT_OK(Put(1, Key(i), values[i]));
|
||||||
@ -2409,7 +2410,7 @@ TEST_F(DBTest2, ReadAmpBitmap) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (size_t i = 0; i < kNumEntries; i++) {
|
for (size_t i = 0; i < kNumEntries; i++) {
|
||||||
ASSERT_OK(Put(Key(static_cast<int>(i)), RandomString(&rnd, 100)));
|
ASSERT_OK(Put(Key(static_cast<int>(i)), rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -2516,7 +2517,7 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < kNumEntries; i++) {
|
for (int i = 0; i < kNumEntries; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -2739,13 +2740,13 @@ TEST_F(DBTest2, PausingManualCompaction1) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
// Generate a file containing 10 keys.
|
// Generate a file containing 10 keys.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// Generate another file containing same keys
|
// Generate another file containing same keys
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -2818,7 +2819,7 @@ TEST_F(DBTest2, PausingManualCompaction2) {
|
|||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
// Generate a file containing 10 keys.
|
// Generate a file containing 10 keys.
|
||||||
for (int j = 0; j < 100; j++) {
|
for (int j = 0; j < 100; j++) {
|
||||||
ASSERT_OK(Put(Key(j), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(j), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
@ -2840,7 +2841,7 @@ TEST_F(DBTest2, PausingManualCompaction3) {
|
|||||||
for (int i = 0; i < options.num_levels; i++) {
|
for (int i = 0; i < options.num_levels; i++) {
|
||||||
for (int j = 0; j < options.num_levels - i + 1; j++) {
|
for (int j = 0; j < options.num_levels - i + 1; j++) {
|
||||||
for (int k = 0; k < 1000; k++) {
|
for (int k = 0; k < 1000; k++) {
|
||||||
ASSERT_OK(Put(Key(k + j * 1000), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(k + j * 1000), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -2894,7 +2895,7 @@ TEST_F(DBTest2, PausingManualCompaction4) {
|
|||||||
for (int i = 0; i < options.num_levels; i++) {
|
for (int i = 0; i < options.num_levels; i++) {
|
||||||
for (int j = 0; j < options.num_levels - i + 1; j++) {
|
for (int j = 0; j < options.num_levels - i + 1; j++) {
|
||||||
for (int k = 0; k < 1000; k++) {
|
for (int k = 0; k < 1000; k++) {
|
||||||
ASSERT_OK(Put(Key(k + j * 1000), RandomString(&rnd, 50)));
|
ASSERT_OK(Put(Key(k + j * 1000), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -4021,7 +4022,7 @@ TEST_F(DBTest2, DISABLED_IteratorPinnedMemory) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string v = RandomString(&rnd, 400);
|
std::string v = rnd.RandomString(400);
|
||||||
|
|
||||||
// Since v is the size of a block, each key should take a block
|
// Since v is the size of a block, each key should take a block
|
||||||
// of 400+ bytes.
|
// of 400+ bytes.
|
||||||
@ -4749,7 +4750,7 @@ TEST_F(DBTest2, BlockBasedTablePrefixIndexSeekForPrev) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string large_value = RandomString(&rnd, 500);
|
std::string large_value = rnd.RandomString(500);
|
||||||
|
|
||||||
ASSERT_OK(Put("a1", large_value));
|
ASSERT_OK(Put("a1", large_value));
|
||||||
ASSERT_OK(Put("x1", large_value));
|
ASSERT_OK(Put("x1", large_value));
|
||||||
@ -5011,7 +5012,7 @@ TEST_F(DBTest2, AutoPrefixMode1) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string large_value = RandomString(&rnd, 500);
|
std::string large_value = rnd.RandomString(500);
|
||||||
|
|
||||||
ASSERT_OK(Put("a1", large_value));
|
ASSERT_OK(Put("a1", large_value));
|
||||||
ASSERT_OK(Put("x1", large_value));
|
ASSERT_OK(Put("x1", large_value));
|
||||||
|
@ -8,9 +8,11 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
|
|
||||||
#include "db/forward_iterator.h"
|
#include "db/forward_iterator.h"
|
||||||
#include "rocksdb/env_encryption.h"
|
#include "rocksdb/env_encryption.h"
|
||||||
#include "rocksdb/utilities/object_registry.h"
|
#include "rocksdb/utilities/object_registry.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -408,7 +410,7 @@ Options DBTestBase::GetOptions(
|
|||||||
options.use_direct_reads = true;
|
options.use_direct_reads = true;
|
||||||
options.use_direct_io_for_flush_and_compaction = true;
|
options.use_direct_io_for_flush_and_compaction = true;
|
||||||
options.compaction_readahead_size = 2 * 1024 * 1024;
|
options.compaction_readahead_size = 2 * 1024 * 1024;
|
||||||
test::SetupSyncPointsToMockDirectIO();
|
SetupSyncPointsToMockDirectIO();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif // ROCKSDB_LITE
|
#endif // ROCKSDB_LITE
|
||||||
@ -1192,7 +1194,7 @@ int DBTestBase::GetSstFileCount(std::string path) {
|
|||||||
void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx,
|
void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx,
|
||||||
bool nowait) {
|
bool nowait) {
|
||||||
for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
|
for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
|
||||||
ASSERT_OK(Put(cf, Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
|
ASSERT_OK(Put(cf, Key(*key_idx), rnd->RandomString((i == 99) ? 1 : 990)));
|
||||||
(*key_idx)++;
|
(*key_idx)++;
|
||||||
}
|
}
|
||||||
if (!nowait) {
|
if (!nowait) {
|
||||||
@ -1204,7 +1206,7 @@ void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx,
|
|||||||
// this will generate non-overlapping files since it keeps increasing key_idx
|
// this will generate non-overlapping files since it keeps increasing key_idx
|
||||||
void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
|
void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
|
||||||
for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
|
for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
|
||||||
ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
|
ASSERT_OK(Put(Key(*key_idx), rnd->RandomString((i == 99) ? 1 : 990)));
|
||||||
(*key_idx)++;
|
(*key_idx)++;
|
||||||
}
|
}
|
||||||
if (!nowait) {
|
if (!nowait) {
|
||||||
@ -1217,9 +1219,9 @@ const int DBTestBase::kNumKeysByGenerateNewRandomFile = 51;
|
|||||||
|
|
||||||
void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
|
void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
|
||||||
for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) {
|
for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) {
|
||||||
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000)));
|
ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(2000)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200)));
|
ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(200)));
|
||||||
if (!nowait) {
|
if (!nowait) {
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
|
@ -45,7 +45,6 @@
|
|||||||
#include "test_util/mock_time_env.h"
|
#include "test_util/mock_time_env.h"
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
|
||||||
#include "util/cast_util.h"
|
#include "util/cast_util.h"
|
||||||
#include "util/compression.h"
|
#include "util/compression.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
@ -876,12 +875,6 @@ class DBTestBase : public testing::Test {
|
|||||||
|
|
||||||
~DBTestBase();
|
~DBTestBase();
|
||||||
|
|
||||||
static std::string RandomString(Random* rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::string Key(int i) {
|
static std::string Key(int i) {
|
||||||
char buf[100];
|
char buf[100];
|
||||||
snprintf(buf, sizeof(buf), "key%06d", i);
|
snprintf(buf, sizeof(buf), "key%06d", i);
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#if !defined(ROCKSDB_LITE)
|
#if !defined(ROCKSDB_LITE)
|
||||||
#include "rocksdb/utilities/table_properties_collectors.h"
|
#include "rocksdb/utilities/table_properties_collectors.h"
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -361,7 +362,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) {
|
|||||||
num++) {
|
num++) {
|
||||||
// Write 110KB (11 values, each 10K)
|
// Write 110KB (11 values, each 10K)
|
||||||
for (int i = 0; i < 11; i++) {
|
for (int i = 0; i < 11; i++) {
|
||||||
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
@ -419,7 +420,7 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionSizeAmplification) {
|
|||||||
num++) {
|
num++) {
|
||||||
// Write 110KB (11 values, each 10K)
|
// Write 110KB (11 values, each 10K)
|
||||||
for (int i = 0; i < 11; i++) {
|
for (int i = 0; i < 11; i++) {
|
||||||
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
@ -498,7 +499,7 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionReadAmplification) {
|
|||||||
for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
|
for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
|
||||||
// Write 110KB (11 values, each 10K)
|
// Write 110KB (11 values, each 10K)
|
||||||
for (int i = 0; i < 11; i++) {
|
for (int i = 0; i < 11; i++) {
|
||||||
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
@ -576,7 +577,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
|
|||||||
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize)));
|
ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize)));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -639,17 +640,17 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) {
|
|||||||
// Generate 3 overlapping files
|
// Generate 3 overlapping files
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 210; i++) {
|
for (int i = 0; i < 210; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
for (int i = 200; i < 300; i++) {
|
for (int i = 200; i < 300; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
for (int i = 250; i < 260; i++) {
|
for (int i = 250; i < 260; i++) {
|
||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
@ -960,7 +961,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
|
|||||||
for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
|
for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
|
||||||
// Write 100KB (100 values, each 1K)
|
// Write 100KB (100 values, each 1K)
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(990)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
@ -998,7 +999,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
|
|||||||
num++) {
|
num++) {
|
||||||
// Write 100KB (100 values, each 1K)
|
// Write 100KB (100 values, each 1K)
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -1008,7 +1009,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
|
|||||||
// Generate one more file at level-0, which should trigger level-0
|
// Generate one more file at level-0, which should trigger level-0
|
||||||
// compaction.
|
// compaction.
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -1029,7 +1030,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
|
|||||||
num++) {
|
num++) {
|
||||||
// Write 110KB (11 values, each 10K)
|
// Write 110KB (11 values, each 10K)
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
@ -1039,7 +1040,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
|
|||||||
// Generate one more file at level-0, which should trigger level-0
|
// Generate one more file at level-0, which should trigger level-0
|
||||||
// compaction.
|
// compaction.
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -1050,7 +1051,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
|
|||||||
// Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
|
// Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
|
||||||
// more file at level-0, which should trigger level-0 compaction.
|
// more file at level-0, which should trigger level-0 compaction.
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
|
ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
|
||||||
key_idx++;
|
key_idx++;
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -1530,7 +1531,7 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) {
|
|||||||
|
|
||||||
for (int i = 0; i <= max_key1; i++) {
|
for (int i = 0; i <= max_key1; i++) {
|
||||||
// each value is 10K
|
// each value is 10K
|
||||||
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
}
|
}
|
||||||
@ -1548,7 +1549,7 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) {
|
|||||||
// Insert more keys
|
// Insert more keys
|
||||||
for (int i = max_key1 + 1; i <= max_key2; i++) {
|
for (int i = max_key1 + 1; i <= max_key2; i++) {
|
||||||
// each value is 10K
|
// each value is 10K
|
||||||
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
}
|
}
|
||||||
@ -1580,7 +1581,7 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) {
|
|||||||
// Insert more keys
|
// Insert more keys
|
||||||
for (int i = max_key2 + 1; i <= max_key3; i++) {
|
for (int i = max_key2 + 1; i <= max_key3; i++) {
|
||||||
// each value is 10K
|
// each value is 10K
|
||||||
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
|
ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,8 @@
|
|||||||
#include "options/options_helper.h"
|
#include "options/options_helper.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
class DBWALTestBase : public DBTestBase {
|
class DBWALTestBase : public DBTestBase {
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
#include "rocksdb/utilities/debug.h"
|
#include "rocksdb/utilities/debug.h"
|
||||||
#include "table/block_based/block_based_table_reader.h"
|
#include "table/block_based/block_based_table_reader.h"
|
||||||
#include "table/block_based/block_builder.h"
|
#include "table/block_based/block_builder.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#if !defined(ROCKSDB_LITE)
|
#if !defined(ROCKSDB_LITE)
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#endif
|
#endif
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
class DBBasicTestWithTimestampBase : public DBTestBase {
|
class DBBasicTestWithTimestampBase : public DBTestBase {
|
||||||
|
@ -4,18 +4,20 @@
|
|||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
#include <fstream>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <fstream>
|
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "db/write_batch_internal.h"
|
#include "db/write_batch_internal.h"
|
||||||
#include "db/write_thread.h"
|
#include "db/write_thread.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -246,7 +248,7 @@ TEST_P(DBWriteTest, IOErrorOnSwitchMemtable) {
|
|||||||
mock_env->SetFilesystemActive(false, Status::IOError("Not active"));
|
mock_env->SetFilesystemActive(false, Status::IOError("Not active"));
|
||||||
Status s;
|
Status s;
|
||||||
for (int i = 0; i < 4 * 512; ++i) {
|
for (int i = 0; i < 4 * 512; ++i) {
|
||||||
s = Put(Key(i), RandomString(&rnd, 1024));
|
s = Put(Key(i), rnd.RandomString(1024));
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -13,11 +13,12 @@
|
|||||||
#include "rocksdb/io_status.h"
|
#include "rocksdb/io_status.h"
|
||||||
#include "rocksdb/perf_context.h"
|
#include "rocksdb/perf_context.h"
|
||||||
#include "rocksdb/sst_file_manager.h"
|
#include "rocksdb/sst_file_manager.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/fault_injection_test_fs.h"
|
|
||||||
#if !defined(ROCKSDB_LITE)
|
#if !defined(ROCKSDB_LITE)
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#endif
|
#endif
|
||||||
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
#include "utilities/fault_injection_fs.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -744,7 +745,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteError) {
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
|
|
||||||
for (auto i = 0; i < 100; ++i) {
|
for (auto i = 0; i < 100; ++i) {
|
||||||
batch.Put(Key(i), RandomString(&rnd, 1024));
|
batch.Put(Key(i), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
@ -757,7 +758,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteError) {
|
|||||||
int write_error = 0;
|
int write_error = 0;
|
||||||
|
|
||||||
for (auto i = 100; i < 199; ++i) {
|
for (auto i = 100; i < 199; ++i) {
|
||||||
batch.Put(Key(i), RandomString(&rnd, 1024));
|
batch.Put(Key(i), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncPoint::GetInstance()->SetCallBack(
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
@ -820,7 +821,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
|
|
||||||
for (auto i = 0; i < 100; ++i) {
|
for (auto i = 0; i < 100; ++i) {
|
||||||
batch.Put(Key(i), RandomString(&rnd, 1024));
|
batch.Put(Key(i), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
@ -835,7 +836,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
|
|||||||
int write_error = 0;
|
int write_error = 0;
|
||||||
|
|
||||||
for (auto i = 100; i < 200; ++i) {
|
for (auto i = 100; i < 200; ++i) {
|
||||||
batch.Put(Key(i), RandomString(&rnd, 1024));
|
batch.Put(Key(i), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncPoint::GetInstance()->SetCallBack(
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
@ -871,7 +872,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
|
|
||||||
for (auto i = 200; i < 300; ++i) {
|
for (auto i = 200; i < 300; ++i) {
|
||||||
batch.Put(Key(i), RandomString(&rnd, 1024));
|
batch.Put(Key(i), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
@ -912,7 +913,7 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) {
|
|||||||
|
|
||||||
for (auto i = 1; i < 4; ++i) {
|
for (auto i = 1; i < 4; ++i) {
|
||||||
for (auto j = 0; j < 100; ++j) {
|
for (auto j = 0; j < 100; ++j) {
|
||||||
batch.Put(handles_[i], Key(j), RandomString(&rnd, 1024));
|
batch.Put(handles_[i], Key(j), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -927,7 +928,7 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) {
|
|||||||
|
|
||||||
// Write to one CF
|
// Write to one CF
|
||||||
for (auto i = 100; i < 199; ++i) {
|
for (auto i = 100; i < 199; ++i) {
|
||||||
batch.Put(handles_[2], Key(i), RandomString(&rnd, 1024));
|
batch.Put(handles_[2], Key(i), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncPoint::GetInstance()->SetCallBack(
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
@ -1016,7 +1017,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
|
|
||||||
for (auto j = 0; j <= 100; ++j) {
|
for (auto j = 0; j <= 100; ++j) {
|
||||||
batch.Put(Key(j), RandomString(&rnd, 1024));
|
batch.Put(Key(j), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
@ -1031,7 +1032,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
|
|||||||
|
|
||||||
// Write to one CF
|
// Write to one CF
|
||||||
for (auto j = 100; j < 199; ++j) {
|
for (auto j = 100; j < 199; ++j) {
|
||||||
batch.Put(Key(j), RandomString(&rnd, 1024));
|
batch.Put(Key(j), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
@ -1129,7 +1130,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
|
|
||||||
for (auto j = 0; j <= 100; ++j) {
|
for (auto j = 0; j <= 100; ++j) {
|
||||||
batch.Put(Key(j), RandomString(&rnd, 1024));
|
batch.Put(Key(j), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
@ -1144,7 +1145,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
|
|||||||
|
|
||||||
// Write to one CF
|
// Write to one CF
|
||||||
for (auto j = 100; j < 199; ++j) {
|
for (auto j = 100; j < 199; ++j) {
|
||||||
batch.Put(Key(j), RandomString(&rnd, 1024));
|
batch.Put(Key(j), rnd.RandomString(1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOptions wopts;
|
WriteOptions wopts;
|
||||||
|
@ -10,8 +10,9 @@
|
|||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/sst_file_writer.h"
|
#include "rocksdb/sst_file_writer.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -27,7 +28,7 @@ class ExternalSSTFileBasicTest
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DestroyAndRecreateExternalSSTFilesDir() {
|
void DestroyAndRecreateExternalSSTFilesDir() {
|
||||||
test::DestroyDir(env_, sst_files_dir_);
|
DestroyDir(env_, sst_files_dir_);
|
||||||
env_->CreateDir(sst_files_dir_);
|
env_->CreateDir(sst_files_dir_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,9 +161,7 @@ class ExternalSSTFileBasicTest
|
|||||||
write_global_seqno, verify_checksums_before_ingest, true_data);
|
write_global_seqno, verify_checksums_before_ingest, true_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
~ExternalSSTFileBasicTest() override {
|
~ExternalSSTFileBasicTest() override { DestroyDir(env_, sst_files_dir_); }
|
||||||
test::DestroyDir(env_, sst_files_dir_);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
std::string sst_files_dir_;
|
std::string sst_files_dir_;
|
||||||
@ -1147,7 +1146,7 @@ TEST_F(ExternalSSTFileBasicTest, VerifyChecksumReadahead) {
|
|||||||
std::string file_name = sst_files_dir_ + "verify_checksum_readahead_test.sst";
|
std::string file_name = sst_files_dir_ + "verify_checksum_readahead_test.sst";
|
||||||
ASSERT_OK(sst_file_writer->Open(file_name));
|
ASSERT_OK(sst_file_writer->Open(file_name));
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value = DBTestBase::RandomString(&rnd, 4000);
|
std::string value = rnd.RandomString(4000);
|
||||||
for (int i = 0; i < 5000; i++) {
|
for (int i = 0; i < 5000; i++) {
|
||||||
ASSERT_OK(sst_file_writer->Put(DBTestBase::Key(i), value));
|
ASSERT_OK(sst_file_writer->Put(DBTestBase::Key(i), value));
|
||||||
}
|
}
|
||||||
|
@ -14,8 +14,9 @@
|
|||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/sst_file_reader.h"
|
#include "rocksdb/sst_file_reader.h"
|
||||||
#include "rocksdb/sst_file_writer.h"
|
#include "rocksdb/sst_file_writer.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -46,7 +47,7 @@ class ExternSSTFileLinkFailFallbackTest
|
|||||||
: DBTestBase("/external_sst_file_test"),
|
: DBTestBase("/external_sst_file_test"),
|
||||||
test_env_(new ExternalSSTTestEnv(env_, true)) {
|
test_env_(new ExternalSSTTestEnv(env_, true)) {
|
||||||
sst_files_dir_ = dbname_ + "/sst_files/";
|
sst_files_dir_ = dbname_ + "/sst_files/";
|
||||||
test::DestroyDir(env_, sst_files_dir_);
|
DestroyDir(env_, sst_files_dir_);
|
||||||
env_->CreateDir(sst_files_dir_);
|
env_->CreateDir(sst_files_dir_);
|
||||||
options_ = CurrentOptions();
|
options_ = CurrentOptions();
|
||||||
options_.disable_auto_compactions = true;
|
options_.disable_auto_compactions = true;
|
||||||
@ -77,7 +78,7 @@ class ExternalSSTFileTest
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DestroyAndRecreateExternalSSTFilesDir() {
|
void DestroyAndRecreateExternalSSTFilesDir() {
|
||||||
test::DestroyDir(env_, sst_files_dir_);
|
DestroyDir(env_, sst_files_dir_);
|
||||||
env_->CreateDir(sst_files_dir_);
|
env_->CreateDir(sst_files_dir_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,7 +281,7 @@ class ExternalSSTFileTest
|
|||||||
return db_->IngestExternalFile(files, opts);
|
return db_->IngestExternalFile(files, opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
~ExternalSSTFileTest() override { test::DestroyDir(env_, sst_files_dir_); }
|
~ExternalSSTFileTest() override { DestroyDir(env_, sst_files_dir_); }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int last_file_id_ = 0;
|
int last_file_id_ = 0;
|
||||||
@ -1751,10 +1752,8 @@ TEST_P(ExternalSSTFileTest, IngestFileWithGlobalSeqnoRandomized) {
|
|||||||
for (int i = 0; i < 500; i++) {
|
for (int i = 0; i < 500; i++) {
|
||||||
std::vector<std::pair<std::string, std::string>> random_data;
|
std::vector<std::pair<std::string, std::string>> random_data;
|
||||||
for (int j = 0; j < 100; j++) {
|
for (int j = 0; j < 100; j++) {
|
||||||
std::string k;
|
std::string k = rnd.RandomString(rnd.Next() % 20);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(rnd.Next() % 50);
|
||||||
test::RandomString(&rnd, rnd.Next() % 20, &k);
|
|
||||||
test::RandomString(&rnd, rnd.Next() % 50, &v);
|
|
||||||
random_data.emplace_back(k, v);
|
random_data.emplace_back(k, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2388,8 +2387,7 @@ TEST_F(ExternalSSTFileTest, IngestFileWrittenWithCompressionDictionary) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::vector<std::pair<std::string, std::string>> random_data;
|
std::vector<std::pair<std::string, std::string>> random_data;
|
||||||
for (int i = 0; i < kNumEntries; i++) {
|
for (int i = 0; i < kNumEntries; i++) {
|
||||||
std::string val;
|
std::string val = rnd.RandomString(kNumBytesPerEntry);
|
||||||
test::RandomString(&rnd, kNumBytesPerEntry, &val);
|
|
||||||
random_data.emplace_back(Key(i), std::move(val));
|
random_data.emplace_back(Key(i), std::move(val));
|
||||||
}
|
}
|
||||||
ASSERT_OK(GenerateAndAddExternalFile(options, std::move(random_data)));
|
ASSERT_OK(GenerateAndAddExternalFile(options, std::move(random_data)));
|
||||||
@ -2844,7 +2842,7 @@ TEST_P(ExternalSSTFileTest, DeltaEncodingWhileGlobalSeqnoPresent) {
|
|||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
constexpr size_t kValueSize = 8;
|
constexpr size_t kValueSize = 8;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value(RandomString(&rnd, kValueSize));
|
std::string value = rnd.RandomString(kValueSize);
|
||||||
|
|
||||||
// Write some key to make global seqno larger than zero
|
// Write some key to make global seqno larger than zero
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
@ -2888,7 +2886,7 @@ TEST_P(ExternalSSTFileTest,
|
|||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::string value(RandomString(&rnd, kValueSize));
|
std::string value = rnd.RandomString(kValueSize);
|
||||||
|
|
||||||
std::string key0 = "aa";
|
std::string key0 = "aa";
|
||||||
std::string key1 = "ab";
|
std::string key1 = "ab";
|
||||||
|
@ -22,11 +22,12 @@
|
|||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/table.h"
|
#include "rocksdb/table.h"
|
||||||
#include "rocksdb/write_batch.h"
|
#include "rocksdb/write_batch.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -249,7 +250,8 @@ class FaultInjectionTest
|
|||||||
// Return the value to associate with the specified key
|
// Return the value to associate with the specified key
|
||||||
Slice Value(int k, std::string* storage) const {
|
Slice Value(int k, std::string* storage) const {
|
||||||
Random r(k);
|
Random r(k);
|
||||||
return test::RandomString(&r, kValueSize, storage);
|
*storage = r.RandomString(kValueSize);
|
||||||
|
return Slice(*storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CloseDB() {
|
void CloseDB() {
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
|
#include "db/flush_job.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <map>
|
#include <map>
|
||||||
@ -11,7 +13,6 @@
|
|||||||
#include "db/blob/blob_index.h"
|
#include "db/blob/blob_index.h"
|
||||||
#include "db/column_family.h"
|
#include "db/column_family.h"
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
#include "db/flush_job.h"
|
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
#include "file/writable_file_writer.h"
|
#include "file/writable_file_writer.h"
|
||||||
#include "rocksdb/cache.h"
|
#include "rocksdb/cache.h"
|
||||||
@ -19,6 +20,7 @@
|
|||||||
#include "table/mock_table.h"
|
#include "table/mock_table.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -447,7 +449,7 @@ TEST_F(FlushJobTest, Snapshots) {
|
|||||||
std::string key(ToString(i));
|
std::string key(ToString(i));
|
||||||
int insertions = rnd.Uniform(max_inserts_per_keys);
|
int insertions = rnd.Uniform(max_inserts_per_keys);
|
||||||
for (int j = 0; j < insertions; ++j) {
|
for (int j = 0; j < insertions; ++j) {
|
||||||
std::string value(test::RandomHumanReadableString(&rnd, 10));
|
std::string value(rnd.HumanReadableString(10));
|
||||||
auto seqno = ++current_seqno;
|
auto seqno = ++current_seqno;
|
||||||
new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value);
|
new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value);
|
||||||
// a key is visible only if:
|
// a key is visible only if:
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/sst_file_writer.h"
|
#include "rocksdb/sst_file_writer.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -35,14 +37,14 @@ class ImportColumnFamilyTest : public DBTestBase {
|
|||||||
delete metadata_ptr_;
|
delete metadata_ptr_;
|
||||||
metadata_ptr_ = nullptr;
|
metadata_ptr_ = nullptr;
|
||||||
}
|
}
|
||||||
test::DestroyDir(env_, sst_files_dir_);
|
DestroyDir(env_, sst_files_dir_);
|
||||||
test::DestroyDir(env_, export_files_dir_);
|
DestroyDir(env_, export_files_dir_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DestroyAndRecreateExternalSSTFilesDir() {
|
void DestroyAndRecreateExternalSSTFilesDir() {
|
||||||
test::DestroyDir(env_, sst_files_dir_);
|
DestroyDir(env_, sst_files_dir_);
|
||||||
env_->CreateDir(sst_files_dir_);
|
env_->CreateDir(sst_files_dir_);
|
||||||
test::DestroyDir(env_, export_files_dir_);
|
DestroyDir(env_, export_files_dir_);
|
||||||
}
|
}
|
||||||
|
|
||||||
LiveFileMetaData LiveFileMetaDataInit(std::string name, std::string path,
|
LiveFileMetaData LiveFileMetaDataInit(std::string name, std::string path,
|
||||||
@ -411,7 +413,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
|
|||||||
|
|
||||||
// Create a new db and import the files.
|
// Create a new db and import the files.
|
||||||
DB* db_copy;
|
DB* db_copy;
|
||||||
test::DestroyDir(env_, dbname_ + "/db_copy");
|
DestroyDir(env_, dbname_ + "/db_copy");
|
||||||
ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy));
|
ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy));
|
||||||
ColumnFamilyHandle* cfh = nullptr;
|
ColumnFamilyHandle* cfh = nullptr;
|
||||||
ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
||||||
@ -427,7 +429,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
|
|||||||
db_copy->DropColumnFamily(cfh);
|
db_copy->DropColumnFamily(cfh);
|
||||||
db_copy->DestroyColumnFamilyHandle(cfh);
|
db_copy->DestroyColumnFamilyHandle(cfh);
|
||||||
delete db_copy;
|
delete db_copy;
|
||||||
test::DestroyDir(env_, dbname_ + "/db_copy");
|
DestroyDir(env_, dbname_ + "/db_copy");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
||||||
@ -450,7 +452,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
|||||||
snapshots.reserve(kFileBytes / kValueBytes * kNumFiles);
|
snapshots.reserve(kFileBytes / kValueBytes * kNumFiles);
|
||||||
for (int i = 0; i < kNumFiles; ++i) {
|
for (int i = 0; i < kNumFiles; ++i) {
|
||||||
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
for (int j = 0; j < kFileBytes / kValueBytes; ++j) {
|
||||||
auto value = RandomString(&rnd, kValueBytes);
|
auto value = rnd.RandomString(kValueBytes);
|
||||||
ASSERT_OK(Put(1, "key", value));
|
ASSERT_OK(Put(1, "key", value));
|
||||||
snapshots.push_back(db_->GetSnapshot());
|
snapshots.push_back(db_->GetSnapshot());
|
||||||
}
|
}
|
||||||
@ -471,7 +473,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
|||||||
|
|
||||||
// Create a new db and import the files.
|
// Create a new db and import the files.
|
||||||
DB* db_copy;
|
DB* db_copy;
|
||||||
test::DestroyDir(env_, dbname_ + "/db_copy");
|
DestroyDir(env_, dbname_ + "/db_copy");
|
||||||
ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy));
|
ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy));
|
||||||
ColumnFamilyHandle* cfh = nullptr;
|
ColumnFamilyHandle* cfh = nullptr;
|
||||||
ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
||||||
@ -486,7 +488,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
|||||||
db_copy->DropColumnFamily(cfh);
|
db_copy->DropColumnFamily(cfh);
|
||||||
db_copy->DestroyColumnFamilyHandle(cfh);
|
db_copy->DestroyColumnFamilyHandle(cfh);
|
||||||
delete db_copy;
|
delete db_copy;
|
||||||
test::DestroyDir(env_, dbname_ + "/db_copy");
|
DestroyDir(env_, dbname_ + "/db_copy");
|
||||||
for (const Snapshot* snapshot : snapshots) {
|
for (const Snapshot* snapshot : snapshots) {
|
||||||
db_->ReleaseSnapshot(snapshot);
|
db_->ReleaseSnapshot(snapshot);
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include "util/cast_util.h"
|
#include "util/cast_util.h"
|
||||||
#include "util/hash.h"
|
#include "util/hash.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
@ -44,10 +45,10 @@ namespace ROCKSDB_NAMESPACE {
|
|||||||
class PlainTableKeyDecoderTest : public testing::Test {};
|
class PlainTableKeyDecoderTest : public testing::Test {};
|
||||||
|
|
||||||
TEST_F(PlainTableKeyDecoderTest, ReadNonMmap) {
|
TEST_F(PlainTableKeyDecoderTest, ReadNonMmap) {
|
||||||
std::string tmp;
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
const uint32_t kLength = 2222;
|
const uint32_t kLength = 2222;
|
||||||
Slice contents = test::RandomString(&rnd, kLength, &tmp);
|
std::string tmp = rnd.RandomString(kLength);
|
||||||
|
Slice contents(tmp);
|
||||||
test::StringSource* string_source =
|
test::StringSource* string_source =
|
||||||
new test::StringSource(contents, 0, false);
|
new test::StringSource(contents, 0, false);
|
||||||
|
|
||||||
@ -1267,12 +1268,6 @@ static std::string Key(int i) {
|
|||||||
return std::string(buf);
|
return std::string(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string RandomString(Random* rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(PlainTableDBTest, CompactionTrigger) {
|
TEST_P(PlainTableDBTest, CompactionTrigger) {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
options.write_buffer_size = 120 << 10; // 120KB
|
options.write_buffer_size = 120 << 10; // 120KB
|
||||||
@ -1287,7 +1282,7 @@ TEST_P(PlainTableDBTest, CompactionTrigger) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
// Write 120KB (10 values, each 12K)
|
// Write 120KB (10 values, each 12K)
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
values.push_back(RandomString(&rnd, 12 << 10));
|
values.push_back(rnd.RandomString(12 << 10));
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Put(Key(999), ""));
|
ASSERT_OK(Put(Key(999), ""));
|
||||||
@ -1298,7 +1293,7 @@ TEST_P(PlainTableDBTest, CompactionTrigger) {
|
|||||||
//generate one more file in level-0, and should trigger level-0 compaction
|
//generate one more file in level-0, and should trigger level-0 compaction
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < 12; i++) {
|
for (int i = 0; i < 12; i++) {
|
||||||
values.push_back(RandomString(&rnd, 10000));
|
values.push_back(rnd.RandomString(10000));
|
||||||
ASSERT_OK(Put(Key(i), values[i]));
|
ASSERT_OK(Put(Key(i), values[i]));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Put(Key(999), ""));
|
ASSERT_OK(Put(Key(999), ""));
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#ifdef GFLAGS
|
#ifdef GFLAGS
|
||||||
#include "db_stress_tool/db_stress_common.h"
|
#include "db_stress_tool/db_stress_common.h"
|
||||||
|
#include "file/file_util.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
class CfConsistencyStressTest : public StressTest {
|
class CfConsistencyStressTest : public StressTest {
|
||||||
@ -307,7 +308,7 @@ class CfConsistencyStressTest : public StressTest {
|
|||||||
if (db_stress_env->FileExists(checkpoint_dir).ok()) {
|
if (db_stress_env->FileExists(checkpoint_dir).ok()) {
|
||||||
// If the directory might still exist, try to delete the files one by one.
|
// If the directory might still exist, try to delete the files one by one.
|
||||||
// Likely a trash file is still there.
|
// Likely a trash file is still there.
|
||||||
Status my_s = test::DestroyDir(db_stress_env, checkpoint_dir);
|
Status my_s = DestroyDir(db_stress_env, checkpoint_dir);
|
||||||
if (!my_s.ok()) {
|
if (!my_s.ok()) {
|
||||||
fprintf(stderr, "Fail to destory directory before checkpoint: %s",
|
fprintf(stderr, "Fail to destory directory before checkpoint: %s",
|
||||||
my_s.ToString().c_str());
|
my_s.ToString().c_str());
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
@ -58,9 +59,7 @@
|
|||||||
#include "rocksdb/utilities/transaction.h"
|
#include "rocksdb/utilities/transaction.h"
|
||||||
#include "rocksdb/utilities/transaction_db.h"
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
#include "rocksdb/write_batch.h"
|
#include "rocksdb/write_batch.h"
|
||||||
#ifndef NDEBUG
|
#include "test_util/testutil.h"
|
||||||
#include "test_util/fault_injection_test_fs.h"
|
|
||||||
#endif
|
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/compression.h"
|
#include "util/compression.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
@ -69,9 +68,6 @@
|
|||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
#include "utilities/blob_db/blob_db.h"
|
#include "utilities/blob_db/blob_db.h"
|
||||||
#include "test_util/testutil.h"
|
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
|
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
||||||
@ -248,6 +244,9 @@ const int kValueMaxLen = 100;
|
|||||||
// wrapped posix or hdfs environment
|
// wrapped posix or hdfs environment
|
||||||
extern ROCKSDB_NAMESPACE::DbStressEnvWrapper* db_stress_env;
|
extern ROCKSDB_NAMESPACE::DbStressEnvWrapper* db_stress_env;
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
class FaultInjectionTestFS;
|
||||||
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
extern std::shared_ptr<ROCKSDB_NAMESPACE::FaultInjectionTestFS> fault_fs_guard;
|
extern std::shared_ptr<ROCKSDB_NAMESPACE::FaultInjectionTestFS> fault_fs_guard;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
#ifdef GFLAGS
|
#ifdef GFLAGS
|
||||||
#include "db_stress_tool/db_stress_common.h"
|
#include "db_stress_tool/db_stress_common.h"
|
||||||
|
#include "utilities/fault_injection_fs.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
void ThreadBody(void* v) {
|
void ThreadBody(void* v) {
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include "rocksdb/convenience.h"
|
#include "rocksdb/convenience.h"
|
||||||
#include "rocksdb/sst_file_manager.h"
|
#include "rocksdb/sst_file_manager.h"
|
||||||
#include "util/cast_util.h"
|
#include "util/cast_util.h"
|
||||||
|
#include "utilities/fault_injection_fs.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
StressTest::StressTest()
|
StressTest::StressTest()
|
||||||
@ -1341,7 +1342,7 @@ Status StressTest::TestCheckpoint(ThreadState* thread,
|
|||||||
if (db_stress_env->FileExists(checkpoint_dir).ok()) {
|
if (db_stress_env->FileExists(checkpoint_dir).ok()) {
|
||||||
// If the directory might still exist, try to delete the files one by one.
|
// If the directory might still exist, try to delete the files one by one.
|
||||||
// Likely a trash file is still there.
|
// Likely a trash file is still there.
|
||||||
Status my_s = test::DestroyDir(db_stress_env, checkpoint_dir);
|
Status my_s = DestroyDir(db_stress_env, checkpoint_dir);
|
||||||
if (!my_s.ok()) {
|
if (!my_s.ok()) {
|
||||||
fprintf(stderr, "Fail to destory directory before checkpoint: %s",
|
fprintf(stderr, "Fail to destory directory before checkpoint: %s",
|
||||||
my_s.ToString().c_str());
|
my_s.ToString().c_str());
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
#include "db_stress_tool/db_stress_common.h"
|
#include "db_stress_tool/db_stress_common.h"
|
||||||
#include "db_stress_tool/db_stress_driver.h"
|
#include "db_stress_tool/db_stress_driver.h"
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
#include "test_util/fault_injection_test_fs.h"
|
#include "utilities/fault_injection_fs.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -47,7 +47,7 @@ int db_stress_tool(int argc, char** argv) {
|
|||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
if (FLAGS_mock_direct_io) {
|
if (FLAGS_mock_direct_io) {
|
||||||
test::SetupSyncPointsToMockDirectIO();
|
SetupSyncPointsToMockDirectIO();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (FLAGS_statistics) {
|
if (FLAGS_statistics) {
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifdef GFLAGS
|
#ifdef GFLAGS
|
||||||
#include "db_stress_tool/db_stress_common.h"
|
#include "db_stress_tool/db_stress_common.h"
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
#include "test_util/fault_injection_test_fs.h"
|
#include "utilities/fault_injection_fs.h"
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
12
env/env_test.cc
vendored
12
env/env_test.cc
vendored
@ -35,14 +35,15 @@
|
|||||||
#include "port/malloc.h"
|
#include "port/malloc.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/fault_injection_test_fs.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
#include "utilities/fault_injection_fs.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -287,7 +288,7 @@ TEST_F(EnvPosixTest, MemoryMappedFileBuffer) {
|
|||||||
ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
|
ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
test::RandomString(&rnd, kFileBytes, &expected_data);
|
expected_data = rnd.RandomString(kFileBytes);
|
||||||
ASSERT_OK(wfile->Append(expected_data));
|
ASSERT_OK(wfile->Append(expected_data));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1264,9 +1265,8 @@ TEST_F(EnvPosixTest, MultiReadNonAlignedLargeNum) {
|
|||||||
std::string fname = test::PerThreadDBPath(env_, "testfile");
|
std::string fname = test::PerThreadDBPath(env_, "testfile");
|
||||||
|
|
||||||
const size_t kTotalSize = 81920;
|
const size_t kTotalSize = 81920;
|
||||||
std::string expected_data;
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
test::RandomString(&rnd, kTotalSize, &expected_data);
|
std::string expected_data = rnd.RandomString(kTotalSize);
|
||||||
|
|
||||||
// Create file.
|
// Create file.
|
||||||
{
|
{
|
||||||
@ -1949,7 +1949,7 @@ TEST_P(EnvPosixTestWithParam, PosixRandomRWFileRandomized) {
|
|||||||
std::string buf;
|
std::string buf;
|
||||||
for (int i = 0; i < 10000; i++) {
|
for (int i = 0; i < 10000; i++) {
|
||||||
// Genrate random data
|
// Genrate random data
|
||||||
test::RandomString(&rnd, 10, &buf);
|
buf = rnd.RandomString(10);
|
||||||
|
|
||||||
// Pick random offset for write
|
// Pick random offset for write
|
||||||
size_t write_off = rnd.Next() % 1000;
|
size_t write_off = rnd.Next() % 1000;
|
||||||
|
@ -3,18 +3,20 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
|
#include "file/delete_scheduler.h"
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "file/delete_scheduler.h"
|
#include "env/composite_env_wrapper.h"
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "file/sst_file_manager_impl.h"
|
#include "file/sst_file_manager_impl.h"
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/options.h"
|
#include "rocksdb/options.h"
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
@ -40,12 +42,12 @@ class DeleteSchedulerTest : public testing::Test {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({});
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({});
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
for (const auto& dummy_files_dir : dummy_files_dirs_) {
|
for (const auto& dummy_files_dir : dummy_files_dirs_) {
|
||||||
test::DestroyDir(env_, dummy_files_dir);
|
DestroyDir(env_, dummy_files_dir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DestroyAndCreateDir(const std::string& dir) {
|
void DestroyAndCreateDir(const std::string& dir) {
|
||||||
ASSERT_OK(test::DestroyDir(env_, dir));
|
ASSERT_OK(DestroyDir(env_, dir));
|
||||||
EXPECT_OK(env_->CreateDir(dir));
|
EXPECT_OK(env_->CreateDir(dir));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,4 +187,49 @@ IOStatus GenerateOneFileChecksum(FileSystem* fs, const std::string& file_path,
|
|||||||
return IOStatus::OK();
|
return IOStatus::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Status DestroyDir(Env* env, const std::string& dir) {
|
||||||
|
Status s;
|
||||||
|
if (env->FileExists(dir).IsNotFound()) {
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
std::vector<std::string> files_in_dir;
|
||||||
|
s = env->GetChildren(dir, &files_in_dir);
|
||||||
|
if (s.ok()) {
|
||||||
|
for (auto& file_in_dir : files_in_dir) {
|
||||||
|
if (file_in_dir == "." || file_in_dir == "..") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
std::string path = dir + "/" + file_in_dir;
|
||||||
|
bool is_dir = false;
|
||||||
|
s = env->IsDirectory(path, &is_dir);
|
||||||
|
if (s.ok()) {
|
||||||
|
if (is_dir) {
|
||||||
|
s = DestroyDir(env, path);
|
||||||
|
} else {
|
||||||
|
s = env->DeleteFile(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!s.ok()) {
|
||||||
|
// IsDirectory, etc. might not report NotFound
|
||||||
|
if (s.IsNotFound() || env->FileExists(path).IsNotFound()) {
|
||||||
|
// Allow files to be deleted externally
|
||||||
|
s = Status::OK();
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s.ok()) {
|
||||||
|
s = env->DeleteDir(dir);
|
||||||
|
// DeleteDir might or might not report NotFound
|
||||||
|
if (!s.ok() && (s.IsNotFound() || env->FileExists(dir).IsNotFound())) {
|
||||||
|
// Allow to be deleted externally
|
||||||
|
s = Status::OK();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
@ -53,4 +53,7 @@ inline IOStatus PrepareIOFromReadOptions(const ReadOptions& ro, Env* env,
|
|||||||
return IOStatus::OK();
|
return IOStatus::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test method to delete the input directory and all of its contents.
|
||||||
|
// This method is destructive and is meant for use only in tests!!!
|
||||||
|
Status DestroyDir(Env* env, const std::string& dir);
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
@ -3,19 +3,22 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
|
#include "file/random_access_file_reader.h"
|
||||||
|
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/file_system.h"
|
#include "rocksdb/file_system.h"
|
||||||
#include "file/random_access_file_reader.h"
|
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
class RandomAccessFileReaderTest : public testing::Test {
|
class RandomAccessFileReaderTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
test::SetupSyncPointsToMockDirectIO();
|
SetupSyncPointsToMockDirectIO();
|
||||||
env_ = Env::Default();
|
env_ = Env::Default();
|
||||||
fs_ = FileSystem::Default();
|
fs_ = FileSystem::Default();
|
||||||
test_dir_ = test::PerThreadDBPath("random_access_file_reader_test");
|
test_dir_ = test::PerThreadDBPath("random_access_file_reader_test");
|
||||||
@ -23,9 +26,7 @@ class RandomAccessFileReaderTest : public testing::Test {
|
|||||||
ComputeAndSetAlignment();
|
ComputeAndSetAlignment();
|
||||||
}
|
}
|
||||||
|
|
||||||
void TearDown() override {
|
void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); }
|
||||||
EXPECT_OK(test::DestroyDir(env_, test_dir_));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Write(const std::string& fname, const std::string& content) {
|
void Write(const std::string& fname, const std::string& content) {
|
||||||
std::unique_ptr<FSWritableFile> f;
|
std::unique_ptr<FSWritableFile> f;
|
||||||
@ -79,8 +80,7 @@ class RandomAccessFileReaderTest : public testing::Test {
|
|||||||
TEST_F(RandomAccessFileReaderTest, ReadDirectIO) {
|
TEST_F(RandomAccessFileReaderTest, ReadDirectIO) {
|
||||||
std::string fname = "read-direct-io";
|
std::string fname = "read-direct-io";
|
||||||
Random rand(0);
|
Random rand(0);
|
||||||
std::string content;
|
std::string content = rand.RandomString(static_cast<int>(alignment()));
|
||||||
test::RandomString(&rand, static_cast<int>(alignment()), &content);
|
|
||||||
Write(fname, content);
|
Write(fname, content);
|
||||||
|
|
||||||
FileOptions opts;
|
FileOptions opts;
|
||||||
@ -104,8 +104,7 @@ TEST_F(RandomAccessFileReaderTest, MultiReadDirectIO) {
|
|||||||
// Creates a file with 3 pages.
|
// Creates a file with 3 pages.
|
||||||
std::string fname = "multi-read-direct-io";
|
std::string fname = "multi-read-direct-io";
|
||||||
Random rand(0);
|
Random rand(0);
|
||||||
std::string content;
|
std::string content = rand.RandomString(3 * static_cast<int>(alignment()));
|
||||||
test::RandomString(&rand, 3 * static_cast<int>(alignment()), &content);
|
|
||||||
Write(fname, content);
|
Write(fname, content);
|
||||||
|
|
||||||
FileOptions opts;
|
FileOptions opts;
|
||||||
|
@ -141,7 +141,7 @@ class RandomGenerator {
|
|||||||
RandomGenerator() {
|
RandomGenerator() {
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
auto size = (unsigned)std::max(1048576, FLAGS_item_size);
|
auto size = (unsigned)std::max(1048576, FLAGS_item_size);
|
||||||
test::RandomString(&rnd, size, &data_);
|
data_ = rnd.RandomString(size);
|
||||||
pos_ = 0;
|
pos_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
src.mk
4
src.mk
@ -212,6 +212,8 @@ LIB_SOURCES = \
|
|||||||
utilities/debug.cc \
|
utilities/debug.cc \
|
||||||
utilities/env_mirror.cc \
|
utilities/env_mirror.cc \
|
||||||
utilities/env_timed.cc \
|
utilities/env_timed.cc \
|
||||||
|
utilities/fault_injection_env.cc \
|
||||||
|
utilities/fault_injection_fs.cc \
|
||||||
utilities/leveldb_options/leveldb_options.cc \
|
utilities/leveldb_options/leveldb_options.cc \
|
||||||
utilities/memory/memory_util.cc \
|
utilities/memory/memory_util.cc \
|
||||||
utilities/merge_operators/max.cc \
|
utilities/merge_operators/max.cc \
|
||||||
@ -277,8 +279,6 @@ ANALYZER_LIB_SOURCES = \
|
|||||||
|
|
||||||
MOCK_LIB_SOURCES = \
|
MOCK_LIB_SOURCES = \
|
||||||
table/mock_table.cc \
|
table/mock_table.cc \
|
||||||
test_util/fault_injection_test_fs.cc \
|
|
||||||
test_util/fault_injection_test_env.cc
|
|
||||||
|
|
||||||
BENCH_LIB_SOURCES = \
|
BENCH_LIB_SOURCES = \
|
||||||
tools/db_bench_tool.cc \
|
tools/db_bench_tool.cc \
|
||||||
|
@ -4,18 +4,20 @@
|
|||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include "table/block_based/block_based_table_reader.h"
|
#include "table/block_based/block_based_table_reader.h"
|
||||||
#include "rocksdb/file_system.h"
|
|
||||||
#include "table/block_based/partitioned_index_iterator.h"
|
|
||||||
|
|
||||||
#include "db/table_properties_collector.h"
|
#include "db/table_properties_collector.h"
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "options/options_helper.h"
|
#include "options/options_helper.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
|
#include "rocksdb/file_system.h"
|
||||||
#include "table/block_based/block_based_table_builder.h"
|
#include "table/block_based/block_based_table_builder.h"
|
||||||
#include "table/block_based/block_based_table_factory.h"
|
#include "table/block_based/block_based_table_factory.h"
|
||||||
|
#include "table/block_based/partitioned_index_iterator.h"
|
||||||
#include "table/format.h"
|
#include "table/format.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -33,7 +35,7 @@ class BlockBasedTableReaderTest
|
|||||||
std::tie(compression_type_, use_direct_reads_, index_type, no_block_cache) =
|
std::tie(compression_type_, use_direct_reads_, index_type, no_block_cache) =
|
||||||
GetParam();
|
GetParam();
|
||||||
|
|
||||||
test::SetupSyncPointsToMockDirectIO();
|
SetupSyncPointsToMockDirectIO();
|
||||||
test_dir_ = test::PerThreadDBPath("block_based_table_reader_test");
|
test_dir_ = test::PerThreadDBPath("block_based_table_reader_test");
|
||||||
env_ = Env::Default();
|
env_ = Env::Default();
|
||||||
fs_ = FileSystem::Default();
|
fs_ = FileSystem::Default();
|
||||||
@ -46,7 +48,7 @@ class BlockBasedTableReaderTest
|
|||||||
static_cast<BlockBasedTableFactory*>(NewBlockBasedTableFactory(opts)));
|
static_cast<BlockBasedTableFactory*>(NewBlockBasedTableFactory(opts)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TearDown() override { EXPECT_OK(test::DestroyDir(env_, test_dir_)); }
|
void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); }
|
||||||
|
|
||||||
// Creates a table with the specificied key value pairs (kv).
|
// Creates a table with the specificied key value pairs (kv).
|
||||||
void CreateTable(const std::string& table_name,
|
void CreateTable(const std::string& table_name,
|
||||||
@ -159,9 +161,9 @@ TEST_P(BlockBasedTableReaderTest, MultiGet) {
|
|||||||
sprintf(k, "%08u", key);
|
sprintf(k, "%08u", key);
|
||||||
std::string v;
|
std::string v;
|
||||||
if (block % 2) {
|
if (block % 2) {
|
||||||
v = test::RandomHumanReadableString(&rnd, 256);
|
v = rnd.HumanReadableString(256);
|
||||||
} else {
|
} else {
|
||||||
test::RandomString(&rnd, 256, &v);
|
v = rnd.RandomString(256);
|
||||||
}
|
}
|
||||||
kv[std::string(k)] = v;
|
kv[std::string(k)] = v;
|
||||||
key++;
|
key++;
|
||||||
@ -256,8 +258,7 @@ TEST_P(BlockBasedTableReaderTestVerifyChecksum, ChecksumMismatch) {
|
|||||||
// and internal key size is required to be >= 8 bytes,
|
// and internal key size is required to be >= 8 bytes,
|
||||||
// so use %08u as the format string.
|
// so use %08u as the format string.
|
||||||
sprintf(k, "%08u", key);
|
sprintf(k, "%08u", key);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(256);
|
||||||
test::RandomString(&rnd, 256, &v);
|
|
||||||
kv[std::string(k)] = v;
|
kv[std::string(k)] = v;
|
||||||
key++;
|
key++;
|
||||||
}
|
}
|
||||||
|
@ -29,12 +29,6 @@
|
|||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
static std::string RandomString(Random *rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string GenerateInternalKey(int primary_key, int secondary_key,
|
std::string GenerateInternalKey(int primary_key, int secondary_key,
|
||||||
int padding_size, Random *rnd) {
|
int padding_size, Random *rnd) {
|
||||||
char buf[50];
|
char buf[50];
|
||||||
@ -42,7 +36,7 @@ std::string GenerateInternalKey(int primary_key, int secondary_key,
|
|||||||
snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
|
snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
|
||||||
std::string k(p);
|
std::string k(p);
|
||||||
if (padding_size) {
|
if (padding_size) {
|
||||||
k += RandomString(rnd, padding_size);
|
k += rnd->RandomString(padding_size);
|
||||||
}
|
}
|
||||||
AppendInternalKeyFooter(&k, 0 /* seqno */, kTypeValue);
|
AppendInternalKeyFooter(&k, 0 /* seqno */, kTypeValue);
|
||||||
|
|
||||||
@ -67,7 +61,7 @@ void GenerateRandomKVs(std::vector<std::string> *keys,
|
|||||||
keys->emplace_back(GenerateInternalKey(i, j, padding_size, &rnd));
|
keys->emplace_back(GenerateInternalKey(i, j, padding_size, &rnd));
|
||||||
|
|
||||||
// 100 bytes values
|
// 100 bytes values
|
||||||
values->emplace_back(RandomString(&rnd, 100));
|
values->emplace_back(rnd.RandomString(100));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
|
#include "table/block_based/data_block_hash_index.h"
|
||||||
|
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
@ -12,11 +14,11 @@
|
|||||||
#include "table/block_based/block.h"
|
#include "table/block_based/block.h"
|
||||||
#include "table/block_based/block_based_table_reader.h"
|
#include "table/block_based/block_based_table_reader.h"
|
||||||
#include "table/block_based/block_builder.h"
|
#include "table/block_based/block_builder.h"
|
||||||
#include "table/block_based/data_block_hash_index.h"
|
|
||||||
#include "table/get_context.h"
|
#include "table/get_context.h"
|
||||||
#include "table/table_builder.h"
|
#include "table/table_builder.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -35,12 +37,6 @@ bool SearchForOffset(DataBlockHashIndex& index, const char* data,
|
|||||||
return entry == restart_point;
|
return entry == restart_point;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Random KV generator similer to block_test
|
|
||||||
static std::string RandomString(Random* rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
|
std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
|
||||||
Random* rnd) {
|
Random* rnd) {
|
||||||
char buf[50];
|
char buf[50];
|
||||||
@ -48,7 +44,7 @@ std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
|
|||||||
snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
|
snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
|
||||||
std::string k(p);
|
std::string k(p);
|
||||||
if (padding_size) {
|
if (padding_size) {
|
||||||
k += RandomString(rnd, padding_size);
|
k += rnd->RandomString(padding_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return k;
|
return k;
|
||||||
@ -71,7 +67,7 @@ void GenerateRandomKVs(std::vector<std::string>* keys,
|
|||||||
keys->emplace_back(GenerateKey(i, j, padding_size, &rnd));
|
keys->emplace_back(GenerateKey(i, j, padding_size, &rnd));
|
||||||
|
|
||||||
// 100 bytes values
|
// 100 bytes values
|
||||||
values->emplace_back(RandomString(&rnd, 100));
|
values->emplace_back(rnd.RandomString(100));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,10 @@
|
|||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include "table/block_fetcher.h"
|
#include "table/block_fetcher.h"
|
||||||
|
|
||||||
#include "db/table_properties_collector.h"
|
#include "db/table_properties_collector.h"
|
||||||
|
#include "env/composite_env_wrapper.h"
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "options/options_helper.h"
|
#include "options/options_helper.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
@ -14,7 +17,6 @@
|
|||||||
#include "table/block_based/block_based_table_reader.h"
|
#include "table/block_based/block_based_table_reader.h"
|
||||||
#include "table/format.h"
|
#include "table/format.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
namespace {
|
namespace {
|
||||||
@ -71,14 +73,14 @@ class BlockFetcherTest : public testing::Test {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
test::SetupSyncPointsToMockDirectIO();
|
SetupSyncPointsToMockDirectIO();
|
||||||
test_dir_ = test::PerThreadDBPath("block_fetcher_test");
|
test_dir_ = test::PerThreadDBPath("block_fetcher_test");
|
||||||
env_ = Env::Default();
|
env_ = Env::Default();
|
||||||
fs_ = FileSystem::Default();
|
fs_ = FileSystem::Default();
|
||||||
ASSERT_OK(fs_->CreateDir(test_dir_, IOOptions(), nullptr));
|
ASSERT_OK(fs_->CreateDir(test_dir_, IOOptions(), nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TearDown() override { EXPECT_OK(test::DestroyDir(env_, test_dir_)); }
|
void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); }
|
||||||
|
|
||||||
void AssertSameBlock(const std::string& block1, const std::string& block2) {
|
void AssertSameBlock(const std::string& block1, const std::string& block2) {
|
||||||
ASSERT_EQ(block1, block2);
|
ASSERT_EQ(block1, block2);
|
||||||
|
@ -3,12 +3,13 @@
|
|||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "table/merging_iterator.h"
|
#include "table/merging_iterator.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -24,7 +25,7 @@ class MergerTest : public testing::Test {
|
|||||||
std::vector<std::string> ret;
|
std::vector<std::string> ret;
|
||||||
|
|
||||||
for (size_t i = 0; i < len; ++i) {
|
for (size_t i = 0; i < len; ++i) {
|
||||||
InternalKey ik(test::RandomHumanReadableString(&rnd_, string_len), 0,
|
InternalKey ik(rnd_.HumanReadableString(string_len), 0,
|
||||||
ValueType::kTypeValue);
|
ValueType::kTypeValue);
|
||||||
ret.push_back(ik.Encode().ToString(false));
|
ret.push_back(ik.Encode().ToString(false));
|
||||||
}
|
}
|
||||||
@ -44,8 +45,7 @@ class MergerTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SeekToRandom() {
|
void SeekToRandom() {
|
||||||
InternalKey ik(test::RandomHumanReadableString(&rnd_, 5), 0,
|
InternalKey ik(rnd_.HumanReadableString(5), 0, ValueType::kTypeValue);
|
||||||
ValueType::kTypeValue);
|
|
||||||
Seek(ik.Encode().ToString(false));
|
Seek(ik.Encode().ToString(false));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1227,8 +1227,7 @@ class FileChecksumTestHelper {
|
|||||||
void AddKVtoKVMap(int num_entries) {
|
void AddKVtoKVMap(int num_entries) {
|
||||||
Random rnd(test::RandomSeed());
|
Random rnd(test::RandomSeed());
|
||||||
for (int i = 0; i < num_entries; i++) {
|
for (int i = 0; i < num_entries; i++) {
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
kv_map_[test::RandomKey(&rnd, 20)] = v;
|
kv_map_[test::RandomKey(&rnd, 20)] = v;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1899,16 +1898,10 @@ TEST_P(BlockBasedTableTest, SkipPrefixBloomFilter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string RandomString(Random* rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddInternalKey(TableConstructor* c, const std::string& prefix,
|
void AddInternalKey(TableConstructor* c, const std::string& prefix,
|
||||||
std::string value = "v", int /*suffix_len*/ = 800) {
|
std::string value = "v", int /*suffix_len*/ = 800) {
|
||||||
static Random rnd(1023);
|
static Random rnd(1023);
|
||||||
InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue);
|
InternalKey k(prefix + rnd.RandomString(800), 0, kTypeValue);
|
||||||
c->Add(k.Encode().ToString(), value);
|
c->Add(k.Encode().ToString(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2481,7 +2474,7 @@ TEST_P(BlockBasedTableTest, IndexSizeStat) {
|
|||||||
std::vector<std::string> keys;
|
std::vector<std::string> keys;
|
||||||
|
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
keys.push_back(RandomString(&rnd, 10000));
|
keys.push_back(rnd.RandomString(10000));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Each time we load one more key to the table. the table index block
|
// Each time we load one more key to the table. the table index block
|
||||||
@ -2525,7 +2518,7 @@ TEST_P(BlockBasedTableTest, NumBlockStat) {
|
|||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
// the key/val are slightly smaller than block size, so that each block
|
// the key/val are slightly smaller than block size, so that each block
|
||||||
// holds roughly one key/value pair.
|
// holds roughly one key/value pair.
|
||||||
c.Add(RandomString(&rnd, 900), "val");
|
c.Add(rnd.RandomString(900), "val");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> ks;
|
std::vector<std::string> ks;
|
||||||
@ -3607,9 +3600,8 @@ TEST_P(ParameterizedHarnessTest, RandomizedHarnessTest) {
|
|||||||
for (int num_entries = 0; num_entries < 2000;
|
for (int num_entries = 0; num_entries < 2000;
|
||||||
num_entries += (num_entries < 50 ? 1 : 200)) {
|
num_entries += (num_entries < 50 ? 1 : 200)) {
|
||||||
for (int e = 0; e < num_entries; e++) {
|
for (int e = 0; e < num_entries; e++) {
|
||||||
std::string v;
|
|
||||||
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
||||||
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
rnd.RandomString(rnd.Skewed(5)));
|
||||||
}
|
}
|
||||||
Test(&rnd);
|
Test(&rnd);
|
||||||
}
|
}
|
||||||
@ -3621,8 +3613,7 @@ TEST_F(DBHarnessTest, RandomizedLongDB) {
|
|||||||
int num_entries = 100000;
|
int num_entries = 100000;
|
||||||
for (int e = 0; e < num_entries; e++) {
|
for (int e = 0; e < num_entries; e++) {
|
||||||
std::string v;
|
std::string v;
|
||||||
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)), rnd.RandomString(rnd.Skewed(5)));
|
||||||
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
||||||
}
|
}
|
||||||
Test(&rnd);
|
Test(&rnd);
|
||||||
|
|
||||||
@ -3878,8 +3869,8 @@ TEST_P(IndexBlockRestartIntervalTest, IndexBlockRestartInterval) {
|
|||||||
TableConstructor c(BytewiseComparator());
|
TableConstructor c(BytewiseComparator());
|
||||||
static Random rnd(301);
|
static Random rnd(301);
|
||||||
for (int i = 0; i < kKeysInTable; i++) {
|
for (int i = 0; i < kKeysInTable; i++) {
|
||||||
InternalKey k(RandomString(&rnd, kKeySize), 0, kTypeValue);
|
InternalKey k(rnd.RandomString(kKeySize), 0, kTypeValue);
|
||||||
c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize));
|
c.Add(k.Encode().ToString(), rnd.RandomString(kValSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> keys;
|
std::vector<std::string> keys;
|
||||||
@ -4541,9 +4532,9 @@ TEST_P(BlockBasedTableTest, DataBlockHashIndex) {
|
|||||||
static Random rnd(1048);
|
static Random rnd(1048);
|
||||||
for (int i = 0; i < kNumKeys; i++) {
|
for (int i = 0; i < kNumKeys; i++) {
|
||||||
// padding one "0" to mark existent keys.
|
// padding one "0" to mark existent keys.
|
||||||
std::string random_key(RandomString(&rnd, kKeySize - 1) + "1");
|
std::string random_key(rnd.RandomString(kKeySize - 1) + "1");
|
||||||
InternalKey k(random_key, 0, kTypeValue);
|
InternalKey k(random_key, 0, kTypeValue);
|
||||||
c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize));
|
c.Add(k.Encode().ToString(), rnd.RandomString(kValSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> keys;
|
std::vector<std::string> keys;
|
||||||
|
@ -4,6 +4,10 @@
|
|||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
|
||||||
#include "test_util/sync_point_impl.h"
|
#include "test_util/sync_point_impl.h"
|
||||||
|
|
||||||
int rocksdb_kill_odds = 0;
|
int rocksdb_kill_odds = 0;
|
||||||
@ -64,3 +68,22 @@ void SyncPoint::Process(const std::string& point, void* cb_arg) {
|
|||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
|
|
||||||
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
void SetupSyncPointsToMockDirectIO() {
|
||||||
|
#if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \
|
||||||
|
!defined(OS_SOLARIS) && !defined(OS_AIX) && !defined(OS_OPENBSD)
|
||||||
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"NewWritableFile:O_DIRECT", [&](void* arg) {
|
||||||
|
int* val = static_cast<int*>(arg);
|
||||||
|
*val &= ~O_DIRECT;
|
||||||
|
});
|
||||||
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||||
|
"NewRandomAccessFile:O_DIRECT", [&](void* arg) {
|
||||||
|
int* val = static_cast<int*>(arg);
|
||||||
|
*val &= ~O_DIRECT;
|
||||||
|
});
|
||||||
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
@ -124,6 +124,9 @@ class SyncPoint {
|
|||||||
Data* impl_;
|
Data* impl_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Sets up sync points to mock direct IO instead of actually issuing direct IO
|
||||||
|
// to the file system.
|
||||||
|
void SetupSyncPointsToMockDirectIO();
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
||||||
// Use TEST_SYNC_POINT to specify sync points inside code base.
|
// Use TEST_SYNC_POINT to specify sync points inside code base.
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cctype>
|
#include <cctype>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
@ -23,6 +24,7 @@
|
|||||||
#include "file/writable_file_writer.h"
|
#include "file/writable_file_writer.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
namespace test {
|
namespace test {
|
||||||
@ -30,23 +32,6 @@ namespace test {
|
|||||||
const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version;
|
const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version;
|
||||||
const uint32_t kLatestFormatVersion = 5u;
|
const uint32_t kLatestFormatVersion = 5u;
|
||||||
|
|
||||||
Slice RandomString(Random* rnd, int len, std::string* dst) {
|
|
||||||
dst->resize(len);
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
(*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
|
|
||||||
}
|
|
||||||
return Slice(*dst);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern std::string RandomHumanReadableString(Random* rnd, int len) {
|
|
||||||
std::string ret;
|
|
||||||
ret.resize(len);
|
|
||||||
for (int i = 0; i < len; ++i) {
|
|
||||||
ret[i] = static_cast<char>('a' + rnd->Uniform(26));
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string RandomKey(Random* rnd, int len, RandomKeyType type) {
|
std::string RandomKey(Random* rnd, int len, RandomKeyType type) {
|
||||||
// Make sure to generate a wide variety of characters so we
|
// Make sure to generate a wide variety of characters so we
|
||||||
// test the boundary conditions for short-key optimizations.
|
// test the boundary conditions for short-key optimizations.
|
||||||
@ -78,8 +63,7 @@ extern Slice CompressibleString(Random* rnd, double compressed_fraction,
|
|||||||
int len, std::string* dst) {
|
int len, std::string* dst) {
|
||||||
int raw = static_cast<int>(len * compressed_fraction);
|
int raw = static_cast<int>(len * compressed_fraction);
|
||||||
if (raw < 1) raw = 1;
|
if (raw < 1) raw = 1;
|
||||||
std::string raw_data;
|
std::string raw_data = rnd->RandomString(raw);
|
||||||
RandomString(rnd, raw, &raw_data);
|
|
||||||
|
|
||||||
// Duplicate the random data until we have filled "len" bytes
|
// Duplicate the random data until we have filled "len" bytes
|
||||||
dst->clear();
|
dst->clear();
|
||||||
@ -453,51 +437,6 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options,
|
|||||||
&cf_opt->compression_per_level, rnd);
|
&cf_opt->compression_per_level, rnd);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DestroyDir(Env* env, const std::string& dir) {
|
|
||||||
Status s;
|
|
||||||
if (env->FileExists(dir).IsNotFound()) {
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
std::vector<std::string> files_in_dir;
|
|
||||||
s = env->GetChildren(dir, &files_in_dir);
|
|
||||||
if (s.ok()) {
|
|
||||||
for (auto& file_in_dir : files_in_dir) {
|
|
||||||
if (file_in_dir == "." || file_in_dir == "..") {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
std::string path = dir + "/" + file_in_dir;
|
|
||||||
bool is_dir = false;
|
|
||||||
s = env->IsDirectory(path, &is_dir);
|
|
||||||
if (s.ok()) {
|
|
||||||
if (is_dir) {
|
|
||||||
s = DestroyDir(env, path);
|
|
||||||
} else {
|
|
||||||
s = env->DeleteFile(path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!s.ok()) {
|
|
||||||
// IsDirectory, etc. might not report NotFound
|
|
||||||
if (s.IsNotFound() || env->FileExists(path).IsNotFound()) {
|
|
||||||
// Allow files to be deleted externally
|
|
||||||
s = Status::OK();
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s.ok()) {
|
|
||||||
s = env->DeleteDir(dir);
|
|
||||||
// DeleteDir might or might not report NotFound
|
|
||||||
if (!s.ok() && (s.IsNotFound() || env->FileExists(dir).IsNotFound())) {
|
|
||||||
// Allow to be deleted externally
|
|
||||||
s = Status::OK();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsDirectIOSupported(Env* env, const std::string& dir) {
|
bool IsDirectIOSupported(Env* env, const std::string& dir) {
|
||||||
EnvOptions env_options;
|
EnvOptions env_options;
|
||||||
env_options.use_mmap_writes = false;
|
env_options.use_mmap_writes = false;
|
||||||
@ -531,22 +470,6 @@ size_t GetLinesCount(const std::string& fname, const std::string& pattern) {
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetupSyncPointsToMockDirectIO() {
|
|
||||||
#if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \
|
|
||||||
!defined(OS_SOLARIS) && !defined(OS_AIX) && !defined(OS_OPENBSD)
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
||||||
"NewWritableFile:O_DIRECT", [&](void* arg) {
|
|
||||||
int* val = static_cast<int*>(arg);
|
|
||||||
*val &= ~O_DIRECT;
|
|
||||||
});
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
||||||
"NewRandomAccessFile:O_DIRECT", [&](void* arg) {
|
|
||||||
int* val = static_cast<int*>(arg);
|
|
||||||
*val &= ~O_DIRECT;
|
|
||||||
});
|
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt) {
|
void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt) {
|
||||||
struct stat sbuf;
|
struct stat sbuf;
|
||||||
|
@ -26,9 +26,9 @@
|
|||||||
#include "table/internal_iterator.h"
|
#include "table/internal_iterator.h"
|
||||||
#include "table/plain/plain_table_factory.h"
|
#include "table/plain/plain_table_factory.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/random.h"
|
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
class Random;
|
||||||
class SequentialFile;
|
class SequentialFile;
|
||||||
class SequentialFileReader;
|
class SequentialFileReader;
|
||||||
|
|
||||||
@ -37,12 +37,6 @@ namespace test {
|
|||||||
extern const uint32_t kDefaultFormatVersion;
|
extern const uint32_t kDefaultFormatVersion;
|
||||||
extern const uint32_t kLatestFormatVersion;
|
extern const uint32_t kLatestFormatVersion;
|
||||||
|
|
||||||
// Store in *dst a random string of length "len" and return a Slice that
|
|
||||||
// references the generated data.
|
|
||||||
extern Slice RandomString(Random* rnd, int len, std::string* dst);
|
|
||||||
|
|
||||||
extern std::string RandomHumanReadableString(Random* rnd, int len);
|
|
||||||
|
|
||||||
// Return a random key with the specified length that may contain interesting
|
// Return a random key with the specified length that may contain interesting
|
||||||
// characters (e.g. \x00, \xff, etc.).
|
// characters (e.g. \x00, \xff, etc.).
|
||||||
enum RandomKeyType : char { RANDOM, LARGEST, SMALLEST, MIDDLE };
|
enum RandomKeyType : char { RANDOM, LARGEST, SMALLEST, MIDDLE };
|
||||||
@ -796,8 +790,6 @@ TableFactory* RandomTableFactory(Random* rnd, int pre_defined = -1);
|
|||||||
|
|
||||||
std::string RandomName(Random* rnd, const size_t len);
|
std::string RandomName(Random* rnd, const size_t len);
|
||||||
|
|
||||||
Status DestroyDir(Env* env, const std::string& dir);
|
|
||||||
|
|
||||||
bool IsDirectIOSupported(Env* env, const std::string& dir);
|
bool IsDirectIOSupported(Env* env, const std::string& dir);
|
||||||
|
|
||||||
// Return the number of lines where a given pattern was found in a file.
|
// Return the number of lines where a given pattern was found in a file.
|
||||||
@ -808,9 +800,6 @@ size_t GetLinesCount(const std::string& fname, const std::string& pattern);
|
|||||||
// Tries to set TEST_TMPDIR to a directory supporting direct IO.
|
// Tries to set TEST_TMPDIR to a directory supporting direct IO.
|
||||||
void ResetTmpDirForDirectIO();
|
void ResetTmpDirForDirectIO();
|
||||||
|
|
||||||
// Sets up sync points to mock direct IO instead of actually issuing direct IO
|
|
||||||
// to the file system.
|
|
||||||
void SetupSyncPointsToMockDirectIO();
|
|
||||||
|
|
||||||
void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt);
|
void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt);
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
@ -28,7 +29,7 @@ TEST(TestUtil, DestroyDirRecursively) {
|
|||||||
ASSERT_OK(env->CreateDir(test_dir + "/dir"));
|
ASSERT_OK(env->CreateDir(test_dir + "/dir"));
|
||||||
CreateFile(env, test_dir + "/dir/file");
|
CreateFile(env, test_dir + "/dir/file");
|
||||||
|
|
||||||
ASSERT_OK(test::DestroyDir(env, test_dir));
|
ASSERT_OK(DestroyDir(env, test_dir));
|
||||||
auto s = env->FileExists(test_dir);
|
auto s = env->FileExists(test_dir);
|
||||||
ASSERT_TRUE(s.IsNotFound());
|
ASSERT_TRUE(s.IsNotFound());
|
||||||
}
|
}
|
||||||
|
@ -37,20 +37,14 @@ struct DataPumpThread {
|
|||||||
DB* db; // Assumption DB is Open'ed already.
|
DB* db; // Assumption DB is Open'ed already.
|
||||||
};
|
};
|
||||||
|
|
||||||
static std::string RandomString(Random* rnd, int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(rnd, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void DataPumpThreadBody(void* arg) {
|
static void DataPumpThreadBody(void* arg) {
|
||||||
DataPumpThread* t = reinterpret_cast<DataPumpThread*>(arg);
|
DataPumpThread* t = reinterpret_cast<DataPumpThread*>(arg);
|
||||||
DB* db = t->db;
|
DB* db = t->db;
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
while (i++ < t->no_records) {
|
while (i++ < t->no_records) {
|
||||||
if (!db->Put(WriteOptions(), Slice(RandomString(&rnd, 500)),
|
if (!db->Put(WriteOptions(), Slice(rnd.RandomString(500)),
|
||||||
Slice(RandomString(&rnd, 500)))
|
Slice(rnd.RandomString(500)))
|
||||||
.ok()) {
|
.ok()) {
|
||||||
fprintf(stderr, "Error in put\n");
|
fprintf(stderr, "Error in put\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
#include "rocksdb/utilities/ldb_cmd.h"
|
#include "rocksdb/utilities/ldb_cmd.h"
|
||||||
|
|
||||||
#include "db/version_edit.h"
|
#include "db/version_edit.h"
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
#include "env/composite_env_wrapper.h"
|
#include "env/composite_env_wrapper.h"
|
||||||
@ -16,6 +17,7 @@
|
|||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "util/file_checksum_helper.h"
|
#include "util/file_checksum_helper.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::vector;
|
using std::vector;
|
||||||
@ -284,32 +286,28 @@ TEST_F(LdbCmdTest, DumpFileChecksumNoChecksum) {
|
|||||||
for (int i = 0; i < 200; i++) {
|
for (int i = 0; i < 200; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
for (int i = 100; i < 300; i++) {
|
for (int i = 100; i < 300; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
for (int i = 200; i < 400; i++) {
|
for (int i = 200; i < 400; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
for (int i = 300; i < 400; i++) {
|
for (int i = 300; i < 400; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
@ -369,32 +367,28 @@ TEST_F(LdbCmdTest, DumpFileChecksumCRC32) {
|
|||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
for (int i = 50; i < 150; i++) {
|
for (int i = 50; i < 150; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
for (int i = 100; i < 200; i++) {
|
for (int i = 100; i < 200; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
for (int i = 150; i < 250; i++) {
|
for (int i = 150; i < 250; i++) {
|
||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%08d", i);
|
snprintf(buf, sizeof(buf), "%08d", i);
|
||||||
std::string v;
|
std::string v = rnd.RandomString(100);
|
||||||
test::RandomString(&rnd, 100, &v);
|
|
||||||
ASSERT_OK(db->Put(wopts, buf, v));
|
ASSERT_OK(db->Put(wopts, buf, v));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Flush(fopts));
|
ASSERT_OK(db->Flush(fopts));
|
||||||
|
@ -166,8 +166,7 @@ TEST_F(WritableFileWriterTest, IncrementalBuffer) {
|
|||||||
std::string target;
|
std::string target;
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
uint32_t num = r.Skewed(16) * 100 + r.Uniform(100);
|
uint32_t num = r.Skewed(16) * 100 + r.Uniform(100);
|
||||||
std::string random_string;
|
std::string random_string = r.RandomString(num);
|
||||||
test::RandomString(&r, num, &random_string);
|
|
||||||
writer->Append(Slice(random_string.c_str(), num));
|
writer->Append(Slice(random_string.c_str(), num));
|
||||||
target.append(random_string.c_str(), num);
|
target.append(random_string.c_str(), num);
|
||||||
|
|
||||||
@ -288,8 +287,7 @@ TEST_P(ReadaheadRandomAccessFileTest, SourceStrLenGreaterThanReadaheadSize) {
|
|||||||
for (int k = 0; k < 100; ++k) {
|
for (int k = 0; k < 100; ++k) {
|
||||||
size_t strLen = k * GetReadaheadSize() +
|
size_t strLen = k * GetReadaheadSize() +
|
||||||
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
||||||
std::string str =
|
std::string str = rng.HumanReadableString(static_cast<int>(strLen));
|
||||||
test::RandomHumanReadableString(&rng, static_cast<int>(strLen));
|
|
||||||
ResetSourceStr(str);
|
ResetSourceStr(str);
|
||||||
for (int test = 1; test <= 100; ++test) {
|
for (int test = 1; test <= 100; ++test) {
|
||||||
size_t offset = rng.Uniform(static_cast<int>(strLen));
|
size_t offset = rng.Uniform(static_cast<int>(strLen));
|
||||||
@ -304,8 +302,7 @@ TEST_P(ReadaheadRandomAccessFileTest, ReadExceedsReadaheadSize) {
|
|||||||
Random rng(7);
|
Random rng(7);
|
||||||
size_t strLen = 4 * GetReadaheadSize() +
|
size_t strLen = 4 * GetReadaheadSize() +
|
||||||
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
||||||
std::string str =
|
std::string str = rng.HumanReadableString(static_cast<int>(strLen));
|
||||||
test::RandomHumanReadableString(&rng, static_cast<int>(strLen));
|
|
||||||
ResetSourceStr(str);
|
ResetSourceStr(str);
|
||||||
for (int test = 1; test <= 100; ++test) {
|
for (int test = 1; test <= 100; ++test) {
|
||||||
size_t offset = rng.Uniform(static_cast<int>(strLen));
|
size_t offset = rng.Uniform(static_cast<int>(strLen));
|
||||||
@ -383,8 +380,7 @@ TEST_P(ReadaheadSequentialFileTest, SourceStrLenGreaterThanReadaheadSize) {
|
|||||||
for (int k = 0; k < 100; ++k) {
|
for (int k = 0; k < 100; ++k) {
|
||||||
size_t strLen = k * GetReadaheadSize() +
|
size_t strLen = k * GetReadaheadSize() +
|
||||||
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
||||||
std::string str =
|
std::string str = rng.HumanReadableString(static_cast<int>(strLen));
|
||||||
test::RandomHumanReadableString(&rng, static_cast<int>(strLen));
|
|
||||||
ResetSourceStr(str);
|
ResetSourceStr(str);
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
for (int test = 1; test <= 100; ++test) {
|
for (int test = 1; test <= 100; ++test) {
|
||||||
@ -406,8 +402,7 @@ TEST_P(ReadaheadSequentialFileTest, ReadExceedsReadaheadSize) {
|
|||||||
for (int k = 0; k < 100; ++k) {
|
for (int k = 0; k < 100; ++k) {
|
||||||
size_t strLen = k * GetReadaheadSize() +
|
size_t strLen = k * GetReadaheadSize() +
|
||||||
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
rng.Uniform(static_cast<int>(GetReadaheadSize()));
|
||||||
std::string str =
|
std::string str = rng.HumanReadableString(static_cast<int>(strLen));
|
||||||
test::RandomHumanReadableString(&rng, static_cast<int>(strLen));
|
|
||||||
ResetSourceStr(str);
|
ResetSourceStr(str);
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
for (int test = 1; test <= 100; ++test) {
|
for (int test = 1; test <= 100; ++test) {
|
||||||
|
@ -35,4 +35,22 @@ Random* Random::GetTLSInstance() {
|
|||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string Random::HumanReadableString(int len) {
|
||||||
|
std::string ret;
|
||||||
|
ret.resize(len);
|
||||||
|
for (int i = 0; i < len; ++i) {
|
||||||
|
ret[i] = static_cast<char>('a' + Uniform(26));
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string Random::RandomString(int len) {
|
||||||
|
std::string ret;
|
||||||
|
ret.resize(len);
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
ret[i] = static_cast<char>(' ' + Uniform(95)); // ' ' .. '~'
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
@ -86,6 +86,12 @@ class Random {
|
|||||||
return Uniform(1 << Uniform(max_log + 1));
|
return Uniform(1 << Uniform(max_log + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns a random string of length "len"
|
||||||
|
std::string RandomString(int len);
|
||||||
|
|
||||||
|
// Generates a random string of len bytes using human-readable characters
|
||||||
|
std::string HumanReadableString(int len);
|
||||||
|
|
||||||
// Returns a Random instance for use by the current thread without
|
// Returns a Random instance for use by the current thread without
|
||||||
// additional locking
|
// additional locking
|
||||||
static Random* GetTLSInstance();
|
static Random* GetTLSInstance();
|
||||||
|
@ -446,8 +446,7 @@ class FileManager : public EnvWrapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (uint64_t i = 0; i < bytes_to_corrupt; ++i) {
|
for (uint64_t i = 0; i < bytes_to_corrupt; ++i) {
|
||||||
std::string tmp;
|
std::string tmp = rnd_.RandomString(1);
|
||||||
test::RandomString(&rnd_, 1, &tmp);
|
|
||||||
file_contents[rnd_.Next() % file_contents.size()] = tmp[0];
|
file_contents[rnd_.Next() % file_contents.size()] = tmp[0];
|
||||||
}
|
}
|
||||||
return WriteToFile(fname, file_contents);
|
return WriteToFile(fname, file_contents);
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
|
#include "utilities/blob_db/blob_db.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
@ -22,14 +24,13 @@
|
|||||||
#include "file/sst_file_manager_impl.h"
|
#include "file/sst_file_manager_impl.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "rocksdb/utilities/debug.h"
|
#include "rocksdb/utilities/debug.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "util/cast_util.h"
|
#include "util/cast_util.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
#include "utilities/blob_db/blob_db.h"
|
|
||||||
#include "utilities/blob_db/blob_db_impl.h"
|
#include "utilities/blob_db/blob_db_impl.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
namespace blob_db {
|
namespace blob_db {
|
||||||
@ -142,7 +143,7 @@ class BlobDBTest : public testing::Test {
|
|||||||
void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd,
|
void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd,
|
||||||
std::map<std::string, std::string> *data = nullptr) {
|
std::map<std::string, std::string> *data = nullptr) {
|
||||||
int len = rnd->Next() % kMaxBlobSize + 1;
|
int len = rnd->Next() % kMaxBlobSize + 1;
|
||||||
std::string value = test::RandomHumanReadableString(rnd, len);
|
std::string value = rnd->HumanReadableString(len);
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
blob_db_->PutWithTTL(WriteOptions(), Slice(key), Slice(value), ttl));
|
blob_db_->PutWithTTL(WriteOptions(), Slice(key), Slice(value), ttl));
|
||||||
if (data != nullptr) {
|
if (data != nullptr) {
|
||||||
@ -153,7 +154,7 @@ class BlobDBTest : public testing::Test {
|
|||||||
void PutRandomUntil(const std::string &key, uint64_t expiration, Random *rnd,
|
void PutRandomUntil(const std::string &key, uint64_t expiration, Random *rnd,
|
||||||
std::map<std::string, std::string> *data = nullptr) {
|
std::map<std::string, std::string> *data = nullptr) {
|
||||||
int len = rnd->Next() % kMaxBlobSize + 1;
|
int len = rnd->Next() % kMaxBlobSize + 1;
|
||||||
std::string value = test::RandomHumanReadableString(rnd, len);
|
std::string value = rnd->HumanReadableString(len);
|
||||||
ASSERT_OK(blob_db_->PutUntil(WriteOptions(), Slice(key), Slice(value),
|
ASSERT_OK(blob_db_->PutUntil(WriteOptions(), Slice(key), Slice(value),
|
||||||
expiration));
|
expiration));
|
||||||
if (data != nullptr) {
|
if (data != nullptr) {
|
||||||
@ -169,7 +170,7 @@ class BlobDBTest : public testing::Test {
|
|||||||
void PutRandom(DB *db, const std::string &key, Random *rnd,
|
void PutRandom(DB *db, const std::string &key, Random *rnd,
|
||||||
std::map<std::string, std::string> *data = nullptr) {
|
std::map<std::string, std::string> *data = nullptr) {
|
||||||
int len = rnd->Next() % kMaxBlobSize + 1;
|
int len = rnd->Next() % kMaxBlobSize + 1;
|
||||||
std::string value = test::RandomHumanReadableString(rnd, len);
|
std::string value = rnd->HumanReadableString(len);
|
||||||
ASSERT_OK(db->Put(WriteOptions(), Slice(key), Slice(value)));
|
ASSERT_OK(db->Put(WriteOptions(), Slice(key), Slice(value)));
|
||||||
if (data != nullptr) {
|
if (data != nullptr) {
|
||||||
(*data)[key] = value;
|
(*data)[key] = value;
|
||||||
@ -180,7 +181,7 @@ class BlobDBTest : public testing::Test {
|
|||||||
const std::string &key, Random *rnd, WriteBatch *batch,
|
const std::string &key, Random *rnd, WriteBatch *batch,
|
||||||
std::map<std::string, std::string> *data = nullptr) {
|
std::map<std::string, std::string> *data = nullptr) {
|
||||||
int len = rnd->Next() % kMaxBlobSize + 1;
|
int len = rnd->Next() % kMaxBlobSize + 1;
|
||||||
std::string value = test::RandomHumanReadableString(rnd, len);
|
std::string value = rnd->HumanReadableString(len);
|
||||||
ASSERT_OK(batch->Put(key, value));
|
ASSERT_OK(batch->Put(key, value));
|
||||||
if (data != nullptr) {
|
if (data != nullptr) {
|
||||||
(*data)[key] = value;
|
(*data)[key] = value;
|
||||||
@ -1079,7 +1080,7 @@ TEST_F(BlobDBTest, InlineSmallValues) {
|
|||||||
uint64_t expiration = rnd.Next() % kMaxExpiration;
|
uint64_t expiration = rnd.Next() % kMaxExpiration;
|
||||||
int len = is_small_value ? 50 : 200;
|
int len = is_small_value ? 50 : 200;
|
||||||
std::string key = "key" + ToString(i);
|
std::string key = "key" + ToString(i);
|
||||||
std::string value = test::RandomHumanReadableString(&rnd, len);
|
std::string value = rnd.HumanReadableString(len);
|
||||||
std::string blob_index;
|
std::string blob_index;
|
||||||
data[key] = value;
|
data[key] = value;
|
||||||
SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
@ -1186,8 +1187,7 @@ TEST_F(BlobDBTest, UserCompactionFilter) {
|
|||||||
oss << "key" << std::setw(4) << std::setfill('0') << i;
|
oss << "key" << std::setw(4) << std::setfill('0') << i;
|
||||||
|
|
||||||
const std::string key(oss.str());
|
const std::string key(oss.str());
|
||||||
const std::string value(
|
const std::string value = rnd.HumanReadableString((int)value_size);
|
||||||
test::RandomHumanReadableString(&rnd, (int)value_size));
|
|
||||||
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
|
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
@ -1264,8 +1264,7 @@ TEST_F(BlobDBTest, UserCompactionFilter_BlobIOError) {
|
|||||||
oss << "key" << std::setw(4) << std::setfill('0') << i;
|
oss << "key" << std::setw(4) << std::setfill('0') << i;
|
||||||
|
|
||||||
const std::string key(oss.str());
|
const std::string key(oss.str());
|
||||||
const std::string value(
|
const std::string value = rnd.HumanReadableString(kValueSize);
|
||||||
test::RandomHumanReadableString(&rnd, kValueSize));
|
|
||||||
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
|
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
@ -1319,7 +1318,7 @@ TEST_F(BlobDBTest, FilterExpiredBlobIndex) {
|
|||||||
uint64_t expiration = rnd.Next() % kMaxExpiration;
|
uint64_t expiration = rnd.Next() % kMaxExpiration;
|
||||||
int len = is_small_value ? 10 : 200;
|
int len = is_small_value ? 10 : 200;
|
||||||
std::string key = "key" + ToString(rnd.Next() % kNumKeys);
|
std::string key = "key" + ToString(rnd.Next() % kNumKeys);
|
||||||
std::string value = test::RandomHumanReadableString(&rnd, len);
|
std::string value = rnd.HumanReadableString(len);
|
||||||
if (!has_ttl) {
|
if (!has_ttl) {
|
||||||
if (is_small_value) {
|
if (is_small_value) {
|
||||||
std::string blob_entry;
|
std::string blob_entry;
|
||||||
@ -1440,7 +1439,7 @@ TEST_F(BlobDBTest, FilterForFIFOEviction) {
|
|||||||
// Insert some small values that will be inlined.
|
// Insert some small values that will be inlined.
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
std::string key = "key" + ToString(i);
|
std::string key = "key" + ToString(i);
|
||||||
std::string value = test::RandomHumanReadableString(&rnd, 50);
|
std::string value = rnd.HumanReadableString(50);
|
||||||
uint64_t ttl = rnd.Next() % 120 + 1;
|
uint64_t ttl = rnd.Next() % 120 + 1;
|
||||||
ASSERT_OK(PutWithTTL(key, value, ttl, &data));
|
ASSERT_OK(PutWithTTL(key, value, ttl, &data));
|
||||||
if (ttl >= 60) {
|
if (ttl >= 60) {
|
||||||
@ -1548,8 +1547,7 @@ TEST_F(BlobDBTest, GarbageCollection) {
|
|||||||
oss << "key" << std::setw(4) << std::setfill('0') << i;
|
oss << "key" << std::setw(4) << std::setfill('0') << i;
|
||||||
|
|
||||||
const std::string key(oss.str());
|
const std::string key(oss.str());
|
||||||
const std::string value(
|
const std::string value = rnd.HumanReadableString(kLargeValueSize);
|
||||||
test::RandomHumanReadableString(&rnd, kLargeValueSize));
|
|
||||||
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
|
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
@ -1566,8 +1564,7 @@ TEST_F(BlobDBTest, GarbageCollection) {
|
|||||||
// First, add a large TTL value will be written to its own TTL blob file.
|
// First, add a large TTL value will be written to its own TTL blob file.
|
||||||
{
|
{
|
||||||
const std::string key("key2000");
|
const std::string key("key2000");
|
||||||
const std::string value(
|
const std::string value = rnd.HumanReadableString(kLargeValueSize);
|
||||||
test::RandomHumanReadableString(&rnd, kLargeValueSize));
|
|
||||||
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
|
|
||||||
ASSERT_OK(PutUntil(key, value, kExpiration));
|
ASSERT_OK(PutUntil(key, value, kExpiration));
|
||||||
@ -1583,8 +1580,7 @@ TEST_F(BlobDBTest, GarbageCollection) {
|
|||||||
// Now add a small TTL value (which will be inlined).
|
// Now add a small TTL value (which will be inlined).
|
||||||
{
|
{
|
||||||
const std::string key("key3000");
|
const std::string key("key3000");
|
||||||
const std::string value(
|
const std::string value = rnd.HumanReadableString(kSmallValueSize);
|
||||||
test::RandomHumanReadableString(&rnd, kSmallValueSize));
|
|
||||||
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
|
|
||||||
ASSERT_OK(PutUntil(key, value, kExpiration));
|
ASSERT_OK(PutUntil(key, value, kExpiration));
|
||||||
@ -1600,8 +1596,7 @@ TEST_F(BlobDBTest, GarbageCollection) {
|
|||||||
// value).
|
// value).
|
||||||
{
|
{
|
||||||
const std::string key("key4000");
|
const std::string key("key4000");
|
||||||
const std::string value(
|
const std::string value = rnd.HumanReadableString(kSmallValueSize);
|
||||||
test::RandomHumanReadableString(&rnd, kSmallValueSize));
|
|
||||||
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
|
||||||
|
|
||||||
ASSERT_OK(Put(key, value));
|
ASSERT_OK(Put(key, value));
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
// Syncpoint prevents us building and running tests in release
|
// Syncpoint prevents us building and running tests in release
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
#include "rocksdb/utilities/checkpoint.h"
|
||||||
|
|
||||||
#ifndef OS_WIN
|
#ifndef OS_WIN
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
@ -16,17 +17,18 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/db.h"
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/utilities/checkpoint.h"
|
|
||||||
#include "rocksdb/utilities/transaction_db.h"
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
class CheckpointTest : public testing::Test {
|
class CheckpointTest : public testing::Test {
|
||||||
@ -69,7 +71,7 @@ class CheckpointTest : public testing::Test {
|
|||||||
env_->DeleteDir(snapshot_tmp_name);
|
env_->DeleteDir(snapshot_tmp_name);
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
export_path_ = test::PerThreadDBPath("/export");
|
export_path_ = test::PerThreadDBPath("/export");
|
||||||
test::DestroyDir(env_, export_path_);
|
DestroyDir(env_, export_path_);
|
||||||
cfh_reverse_comp_ = nullptr;
|
cfh_reverse_comp_ = nullptr;
|
||||||
metadata_ = nullptr;
|
metadata_ = nullptr;
|
||||||
}
|
}
|
||||||
@ -94,7 +96,7 @@ class CheckpointTest : public testing::Test {
|
|||||||
options.db_paths.emplace_back(dbname_ + "_4", 0);
|
options.db_paths.emplace_back(dbname_ + "_4", 0);
|
||||||
EXPECT_OK(DestroyDB(dbname_, options));
|
EXPECT_OK(DestroyDB(dbname_, options));
|
||||||
EXPECT_OK(DestroyDB(snapshot_name_, options));
|
EXPECT_OK(DestroyDB(snapshot_name_, options));
|
||||||
test::DestroyDir(env_, export_path_);
|
DestroyDir(env_, export_path_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the current option configuration.
|
// Return the current option configuration.
|
||||||
@ -347,7 +349,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) {
|
|||||||
export_path_, &metadata_));
|
export_path_, &metadata_));
|
||||||
verify_files_exported(*metadata_, 1);
|
verify_files_exported(*metadata_, 1);
|
||||||
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
||||||
test::DestroyDir(env_, export_path_);
|
DestroyDir(env_, export_path_);
|
||||||
delete metadata_;
|
delete metadata_;
|
||||||
metadata_ = nullptr;
|
metadata_ = nullptr;
|
||||||
|
|
||||||
@ -358,7 +360,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) {
|
|||||||
export_path_, &metadata_));
|
export_path_, &metadata_));
|
||||||
verify_files_exported(*metadata_, 2);
|
verify_files_exported(*metadata_, 2);
|
||||||
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
||||||
test::DestroyDir(env_, export_path_);
|
DestroyDir(env_, export_path_);
|
||||||
delete metadata_;
|
delete metadata_;
|
||||||
metadata_ = nullptr;
|
metadata_ = nullptr;
|
||||||
delete checkpoint;
|
delete checkpoint;
|
||||||
@ -404,7 +406,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) {
|
|||||||
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||||
export_path_, &metadata_),
|
export_path_, &metadata_),
|
||||||
Status::InvalidArgument("Specified export_dir exists"));
|
Status::InvalidArgument("Specified export_dir exists"));
|
||||||
test::DestroyDir(env_, export_path_);
|
DestroyDir(env_, export_path_);
|
||||||
|
|
||||||
// Export with invalid directory specification
|
// Export with invalid directory specification
|
||||||
export_path_ = "";
|
export_path_ = "";
|
||||||
|
@ -11,10 +11,12 @@
|
|||||||
// the last "sync". It then checks for data loss errors by purposely dropping
|
// the last "sync". It then checks for data loss errors by purposely dropping
|
||||||
// file data (or entire files) not protected by a "sync".
|
// file data (or entire files) not protected by a "sync".
|
||||||
|
|
||||||
#include "test_util/fault_injection_test_env.h"
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
#include "util/random.h"
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
// Assume a filename, and not a directory name like "/foo/bar/"
|
// Assume a filename, and not a directory name like "/foo/bar/"
|
@ -17,16 +17,12 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "db/version_set.h"
|
|
||||||
#include "env/mock_env.h"
|
|
||||||
#include "file/filename.h"
|
#include "file/filename.h"
|
||||||
#include "rocksdb/db.h"
|
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/random.h"
|
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
class Random;
|
||||||
class TestWritableFile;
|
class TestWritableFile;
|
||||||
class FaultInjectionTestEnv;
|
class FaultInjectionTestEnv;
|
||||||
|
|
@ -14,11 +14,15 @@
|
|||||||
// FileSystem related operations, by specify the "IOStatus Error", a specific
|
// FileSystem related operations, by specify the "IOStatus Error", a specific
|
||||||
// error can be returned when file system is not activated.
|
// error can be returned when file system is not activated.
|
||||||
|
|
||||||
#include "test_util/fault_injection_test_fs.h"
|
#include "utilities/fault_injection_fs.h"
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
#include "env/composite_env_wrapper.h"
|
||||||
#include "port/lang.h"
|
#include "port/lang.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
@ -501,8 +505,7 @@ IOStatus FaultInjectionTestFS::InjectError(ErrorOperation op,
|
|||||||
// The randomly generated string could be identical to the
|
// The randomly generated string could be identical to the
|
||||||
// original one, so retry
|
// original one, so retry
|
||||||
do {
|
do {
|
||||||
str = DBTestBase::RandomString(&ctx->rand,
|
str = ctx->rand.RandomString(static_cast<int>(len));
|
||||||
static_cast<int>(len));
|
|
||||||
} while (str == std::string(scratch + offset, len));
|
} while (str == std::string(scratch + offset, len));
|
||||||
memcpy(scratch + offset, str.data(), len);
|
memcpy(scratch + offset, str.data(), len);
|
||||||
break;
|
break;
|
@ -21,15 +21,11 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "db/db_test_util.h"
|
|
||||||
#include "db/version_set.h"
|
|
||||||
#include "env/mock_env.h"
|
|
||||||
#include "file/filename.h"
|
#include "file/filename.h"
|
||||||
#include "include/rocksdb/file_system.h"
|
#include "include/rocksdb/file_system.h"
|
||||||
#include "rocksdb/db.h"
|
|
||||||
#include "rocksdb/env.h"
|
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
|
#include "util/thread_local.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
@ -13,6 +13,7 @@
|
|||||||
#include "table/block_based/block_based_table_factory.h"
|
#include "table/block_based/block_based_table_factory.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -25,12 +26,6 @@ class MemoryTest : public testing::Test {
|
|||||||
|
|
||||||
std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
|
std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
|
||||||
|
|
||||||
std::string RandomString(int len) {
|
|
||||||
std::string r;
|
|
||||||
test::RandomString(&rnd_, len, &r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
|
void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
|
||||||
std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
|
std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
|
||||||
ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
|
ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
|
||||||
@ -122,9 +117,9 @@ TEST_F(MemoryTest, SharedBlockCacheTotal) {
|
|||||||
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
||||||
for (int i = 0; i < kNumDBs; ++i) {
|
for (int i = 0; i < kNumDBs; ++i) {
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
keys_by_db[i].emplace_back(RandomString(kKeySize));
|
keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize));
|
||||||
dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
|
dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
|
||||||
RandomString(kValueSize));
|
rnd_.RandomString(kValueSize));
|
||||||
}
|
}
|
||||||
dbs[i]->Flush(FlushOptions());
|
dbs[i]->Flush(FlushOptions());
|
||||||
}
|
}
|
||||||
@ -181,8 +176,8 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
|
|||||||
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
||||||
for (int i = 0; i < kNumDBs; ++i) {
|
for (int i = 0; i < kNumDBs; ++i) {
|
||||||
for (auto* handle : vec_handles[i]) {
|
for (auto* handle : vec_handles[i]) {
|
||||||
dbs[i]->Put(WriteOptions(), handle, RandomString(kKeySize),
|
dbs[i]->Put(WriteOptions(), handle, rnd_.RandomString(kKeySize),
|
||||||
RandomString(kValueSize));
|
rnd_.RandomString(kValueSize));
|
||||||
UpdateUsagesHistory(dbs);
|
UpdateUsagesHistory(dbs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -208,7 +203,7 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
|
|||||||
|
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
std::string value;
|
std::string value;
|
||||||
dbs[i]->Get(ReadOptions(), RandomString(kKeySize), &value);
|
dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value);
|
||||||
}
|
}
|
||||||
|
|
||||||
UpdateUsagesHistory(dbs);
|
UpdateUsagesHistory(dbs);
|
||||||
|
@ -8,9 +8,13 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "rocksdb/utilities/option_change_migration.h"
|
#include "rocksdb/utilities/option_change_migration.h"
|
||||||
|
|
||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
class DBOptionChangeMigrationTests
|
class DBOptionChangeMigrationTests
|
||||||
@ -200,7 +204,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int num = 0; num < 20; num++) {
|
for (int num = 0; num < 20; num++) {
|
||||||
for (int i = 0; i < 50; i++) {
|
for (int i = 0; i < 50; i++) {
|
||||||
ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900)));
|
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -274,7 +278,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int num = 0; num < 20; num++) {
|
for (int num = 0; num < 20; num++) {
|
||||||
for (int i = 0; i < 50; i++) {
|
for (int i = 0; i < 50; i++) {
|
||||||
ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900)));
|
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -370,7 +374,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int num = 0; num < 20; num++) {
|
for (int num = 0; num < 20; num++) {
|
||||||
for (int i = 0; i < 50; i++) {
|
for (int i = 0; i < 50; i++) {
|
||||||
ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900)));
|
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#include "rocksdb/cache.h"
|
#include "rocksdb/cache.h"
|
||||||
#include "table/block_based/block_builder.h"
|
#include "table/block_based/block_builder.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
|
#include "util/random.h"
|
||||||
#include "utilities/persistent_cache/volatile_tier_impl.h"
|
#include "utilities/persistent_cache/volatile_tier_impl.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -255,7 +256,7 @@ class PersistentCacheDBTest : public DBTestBase {
|
|||||||
std::string str;
|
std::string str;
|
||||||
for (int i = 0; i < num_iter; i++) {
|
for (int i = 0; i < num_iter; i++) {
|
||||||
if (i % 4 == 0) { // high compression ratio
|
if (i % 4 == 0) { // high compression ratio
|
||||||
str = RandomString(&rnd, 1000);
|
str = rnd.RandomString(1000);
|
||||||
}
|
}
|
||||||
values->push_back(str);
|
values->push_back(str);
|
||||||
ASSERT_OK(Put(1, Key(i), (*values)[i]));
|
ASSERT_OK(Put(1, Key(i), (*values)[i]));
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
#include "utilities/transactions/transaction_lock_mgr.h"
|
#include "utilities/transactions/transaction_lock_mgr.h"
|
||||||
|
|
||||||
|
#include "file/file_util.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
#include "rocksdb/utilities/transaction_db.h"
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
@ -36,7 +38,7 @@ class TransactionLockMgrTest : public testing::Test {
|
|||||||
|
|
||||||
void TearDown() override {
|
void TearDown() override {
|
||||||
delete db_;
|
delete db_;
|
||||||
EXPECT_OK(test::DestroyDir(env_, db_dir_));
|
EXPECT_OK(DestroyDir(env_, db_dir_));
|
||||||
}
|
}
|
||||||
|
|
||||||
PessimisticTransaction* NewTxn(
|
PessimisticTransaction* NewTxn(
|
||||||
|
@ -13,25 +13,24 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
|
#include "port/port.h"
|
||||||
#include "rocksdb/db.h"
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/options.h"
|
#include "rocksdb/options.h"
|
||||||
#include "rocksdb/perf_context.h"
|
#include "rocksdb/perf_context.h"
|
||||||
#include "rocksdb/utilities/transaction.h"
|
#include "rocksdb/utilities/transaction.h"
|
||||||
#include "rocksdb/utilities/transaction_db.h"
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
#include "table/mock_table.h"
|
#include "table/mock_table.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "test_util/transaction_test_util.h"
|
#include "test_util/transaction_test_util.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
#include "utilities/merge_operators/string_append/stringappend.h"
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
||||||
#include "utilities/transactions/pessimistic_transaction_db.h"
|
#include "utilities/transactions/pessimistic_transaction_db.h"
|
||||||
|
|
||||||
#include "port/port.h"
|
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
@ -12,25 +12,24 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
|
#include "port/port.h"
|
||||||
#include "rocksdb/db.h"
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/options.h"
|
#include "rocksdb/options.h"
|
||||||
#include "rocksdb/utilities/transaction.h"
|
#include "rocksdb/utilities/transaction.h"
|
||||||
#include "rocksdb/utilities/transaction_db.h"
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
#include "table/mock_table.h"
|
#include "table/mock_table.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
#include "test_util/transaction_test_util.h"
|
#include "test_util/transaction_test_util.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
#include "utilities/merge_operators/string_append/stringappend.h"
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
||||||
#include "utilities/transactions/pessimistic_transaction_db.h"
|
#include "utilities/transactions/pessimistic_transaction_db.h"
|
||||||
#include "utilities/transactions/write_unprepared_txn_db.h"
|
#include "utilities/transactions/write_unprepared_txn_db.h"
|
||||||
|
|
||||||
#include "port/port.h"
|
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
// Return true if the ith bit is set in combination represented by comb
|
// Return true if the ith bit is set in combination represented by comb
|
||||||
|
@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
|
|
||||||
#include "utilities/transactions/transaction_test.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
@ -16,6 +14,7 @@
|
|||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "port/port.h"
|
||||||
#include "rocksdb/db.h"
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/options.h"
|
#include "rocksdb/options.h"
|
||||||
#include "rocksdb/types.h"
|
#include "rocksdb/types.h"
|
||||||
@ -23,7 +22,6 @@
|
|||||||
#include "rocksdb/utilities/transaction.h"
|
#include "rocksdb/utilities/transaction.h"
|
||||||
#include "rocksdb/utilities/transaction_db.h"
|
#include "rocksdb/utilities/transaction_db.h"
|
||||||
#include "table/mock_table.h"
|
#include "table/mock_table.h"
|
||||||
#include "test_util/fault_injection_test_env.h"
|
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "test_util/testutil.h"
|
#include "test_util/testutil.h"
|
||||||
@ -31,13 +29,13 @@
|
|||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
#include "utilities/fault_injection_env.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
#include "utilities/merge_operators/string_append/stringappend.h"
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
||||||
#include "utilities/transactions/pessimistic_transaction_db.h"
|
#include "utilities/transactions/pessimistic_transaction_db.h"
|
||||||
|
#include "utilities/transactions/transaction_test.h"
|
||||||
#include "utilities/transactions/write_prepared_txn_db.h"
|
#include "utilities/transactions/write_prepared_txn_db.h"
|
||||||
|
|
||||||
#include "port/port.h"
|
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
@ -277,8 +275,8 @@ TEST(WriteBatchWithIndex, SubBatchCnt) {
|
|||||||
for (size_t k = 0; k < 10; k++) { // 10 key per batch
|
for (size_t k = 0; k < 10; k++) { // 10 key per batch
|
||||||
size_t ki = static_cast<size_t>(rnd.Uniform(TOTAL_KEYS));
|
size_t ki = static_cast<size_t>(rnd.Uniform(TOTAL_KEYS));
|
||||||
Slice key = Slice(keys[ki]);
|
Slice key = Slice(keys[ki]);
|
||||||
std::string buffer;
|
std::string tmp = rnd.RandomString(16);
|
||||||
Slice value = Slice(test::RandomString(&rnd, 16, &buffer));
|
Slice value = Slice(tmp);
|
||||||
rndbatch.Put(key, value);
|
rndbatch.Put(key, value);
|
||||||
}
|
}
|
||||||
SubBatchCounter batch_counter(comparators);
|
SubBatchCounter batch_counter(comparators);
|
||||||
|
Loading…
Reference in New Issue
Block a user