Move reusable part of db_test.cc to util/db_test_util.h
Summary: Move reusable part of db_test.cc to util/db_test_util.h. This makes it more possible to partition db_test.cc into multiple smaller test files. Also, fixed many old lint errors in db_test. Test Plan: db_test Reviewers: igor, anthony, IslamAbdelRahman, sdong, kradhakrishnan Reviewed By: sdong, kradhakrishnan Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D41973
This commit is contained in:
parent
e8e8c90499
commit
625467a08a
2
Makefile
2
Makefile
@ -672,7 +672,7 @@ slice_transform_test: util/slice_transform_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
$(AM_LINK)
|
||||
|
||||
|
||||
db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
db_test: db/db_test.o util/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
$(AM_LINK)
|
||||
|
||||
db_iter_test: db/db_iter_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
|
1296
db/db_test.cc
1296
db/db_test.cc
File diff suppressed because it is too large
Load Diff
1
src.mk
1
src.mk
@ -206,6 +206,7 @@ TEST_BENCH_SOURCES = \
|
||||
util/cache_test.cc \
|
||||
util/coding_test.cc \
|
||||
util/crc32c_test.cc \
|
||||
util/db_test_util.cc \
|
||||
util/dynamic_bloom_test.cc \
|
||||
util/env_test.cc \
|
||||
util/filelock_test.cc \
|
||||
|
916
util/db_test_util.cc
Normal file
916
util/db_test_util.cc
Normal file
@ -0,0 +1,916 @@
|
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "util/db_test_util.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
// Special Env used to delay background operations
|
||||
|
||||
SpecialEnv::SpecialEnv(Env* base)
|
||||
: EnvWrapper(base),
|
||||
rnd_(301),
|
||||
sleep_counter_(this),
|
||||
addon_time_(0),
|
||||
no_sleep_(false) {
|
||||
delay_sstable_sync_.store(false, std::memory_order_release);
|
||||
drop_writes_.store(false, std::memory_order_release);
|
||||
no_space_.store(false, std::memory_order_release);
|
||||
non_writable_.store(false, std::memory_order_release);
|
||||
count_random_reads_ = false;
|
||||
count_sequential_reads_ = false;
|
||||
manifest_sync_error_.store(false, std::memory_order_release);
|
||||
manifest_write_error_.store(false, std::memory_order_release);
|
||||
log_write_error_.store(false, std::memory_order_release);
|
||||
log_write_slowdown_ = 0;
|
||||
bytes_written_ = 0;
|
||||
sync_counter_ = 0;
|
||||
non_writeable_rate_ = 0;
|
||||
new_writable_count_ = 0;
|
||||
non_writable_count_ = 0;
|
||||
table_write_callback_ = nullptr;
|
||||
}
|
||||
|
||||
|
||||
DBTestBase::DBTestBase(const std::string path) : option_config_(kDefault),
|
||||
mem_env_(!getenv("MEM_ENV") ? nullptr :
|
||||
new MockEnv(Env::Default())),
|
||||
env_(new SpecialEnv(mem_env_ ? mem_env_ : Env::Default())) {
|
||||
env_->SetBackgroundThreads(1, Env::LOW);
|
||||
env_->SetBackgroundThreads(1, Env::HIGH);
|
||||
dbname_ = test::TmpDir(env_) + path;
|
||||
alternative_wal_dir_ = dbname_ + "/wal";
|
||||
auto options = CurrentOptions();
|
||||
auto delete_options = options;
|
||||
delete_options.wal_dir = alternative_wal_dir_;
|
||||
EXPECT_OK(DestroyDB(dbname_, delete_options));
|
||||
// Destroy it for not alternative WAL dir is used.
|
||||
EXPECT_OK(DestroyDB(dbname_, options));
|
||||
db_ = nullptr;
|
||||
Reopen(options);
|
||||
}
|
||||
|
||||
DBTestBase::~DBTestBase() {
|
||||
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
||||
rocksdb::SyncPoint::GetInstance()->LoadDependency({});
|
||||
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||
Close();
|
||||
Options options;
|
||||
options.db_paths.emplace_back(dbname_, 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_2", 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_3", 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_4", 0);
|
||||
EXPECT_OK(DestroyDB(dbname_, options));
|
||||
delete env_;
|
||||
}
|
||||
|
||||
// Switch to a fresh database with the next option configuration to
|
||||
// test. Return false if there are no more configurations to test.
|
||||
bool DBTestBase::ChangeOptions(int skip_mask) {
|
||||
for (option_config_++; option_config_ < kEnd; option_config_++) {
|
||||
if ((skip_mask & kSkipDeletesFilterFirst) &&
|
||||
option_config_ == kDeletesFilterFirst) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipUniversalCompaction) &&
|
||||
(option_config_ == kUniversalCompaction ||
|
||||
option_config_ == kUniversalCompactionMultiLevel)) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipMergePut) && option_config_ == kMergePut) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipNoSeekToLast) &&
|
||||
(option_config_ == kHashLinkList ||
|
||||
option_config_ == kHashSkipList)) {;
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipPlainTable) &&
|
||||
(option_config_ == kPlainTableAllBytesPrefix ||
|
||||
option_config_ == kPlainTableFirstBytePrefix ||
|
||||
option_config_ == kPlainTableCappedPrefix)) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipHashIndex) &&
|
||||
(option_config_ == kBlockBasedTableWithPrefixHashIndex ||
|
||||
option_config_ == kBlockBasedTableWithWholeKeyHashIndex)) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipHashCuckoo) && (option_config_ == kHashCuckoo)) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipFIFOCompaction) &&
|
||||
option_config_ == kFIFOCompaction) {
|
||||
continue;
|
||||
}
|
||||
if ((skip_mask & kSkipMmapReads) &&
|
||||
option_config_ == kWalDirAndMmapReads) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (option_config_ >= kEnd) {
|
||||
Destroy(last_options_);
|
||||
return false;
|
||||
} else {
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
DestroyAndReopen(options);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Switch between different compaction styles (we have only 2 now).
|
||||
bool DBTestBase::ChangeCompactOptions() {
|
||||
if (option_config_ == kDefault) {
|
||||
option_config_ = kUniversalCompaction;
|
||||
Destroy(last_options_);
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
TryReopen(options);
|
||||
return true;
|
||||
} else if (option_config_ == kUniversalCompaction) {
|
||||
option_config_ = kUniversalCompactionMultiLevel;
|
||||
Destroy(last_options_);
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
TryReopen(options);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Switch between different filter policy
|
||||
// Jump from kDefault to kFilter to kFullFilter
|
||||
bool DBTestBase::ChangeFilterOptions() {
|
||||
if (option_config_ == kDefault) {
|
||||
option_config_ = kFilter;
|
||||
} else if (option_config_ == kFilter) {
|
||||
option_config_ = kFullFilter;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
Destroy(last_options_);
|
||||
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
TryReopen(options);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Return the current option configuration.
|
||||
Options DBTestBase::CurrentOptions(
|
||||
const anon::OptionsOverride& options_override) {
|
||||
Options options;
|
||||
return CurrentOptions(options, options_override);
|
||||
}
|
||||
|
||||
Options DBTestBase::CurrentOptions(
|
||||
const Options& defaultOptions,
|
||||
const anon::OptionsOverride& options_override) {
|
||||
// this redudant copy is to minimize code change w/o having lint error.
|
||||
Options options = defaultOptions;
|
||||
XFUNC_TEST("", "dbtest_options", inplace_options1, GetXFTestOptions,
|
||||
reinterpret_cast<Options*>(&options),
|
||||
options_override.skip_policy);
|
||||
BlockBasedTableOptions table_options;
|
||||
bool set_block_based_table_factory = true;
|
||||
switch (option_config_) {
|
||||
case kHashSkipList:
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
||||
options.memtable_factory.reset(
|
||||
NewHashSkipListRepFactory(16));
|
||||
break;
|
||||
case kPlainTableFirstBytePrefix:
|
||||
options.table_factory.reset(new PlainTableFactory());
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
||||
options.allow_mmap_reads = true;
|
||||
options.max_sequential_skip_in_iterations = 999999;
|
||||
set_block_based_table_factory = false;
|
||||
break;
|
||||
case kPlainTableCappedPrefix:
|
||||
options.table_factory.reset(new PlainTableFactory());
|
||||
options.prefix_extractor.reset(NewCappedPrefixTransform(8));
|
||||
options.allow_mmap_reads = true;
|
||||
options.max_sequential_skip_in_iterations = 999999;
|
||||
set_block_based_table_factory = false;
|
||||
break;
|
||||
case kPlainTableAllBytesPrefix:
|
||||
options.table_factory.reset(new PlainTableFactory());
|
||||
options.prefix_extractor.reset(NewNoopTransform());
|
||||
options.allow_mmap_reads = true;
|
||||
options.max_sequential_skip_in_iterations = 999999;
|
||||
set_block_based_table_factory = false;
|
||||
break;
|
||||
case kMergePut:
|
||||
options.merge_operator = MergeOperators::CreatePutOperator();
|
||||
break;
|
||||
case kFilter:
|
||||
table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
|
||||
break;
|
||||
case kFullFilter:
|
||||
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
|
||||
break;
|
||||
case kUncompressed:
|
||||
options.compression = kNoCompression;
|
||||
break;
|
||||
case kNumLevel_3:
|
||||
options.num_levels = 3;
|
||||
break;
|
||||
case kDBLogDir:
|
||||
options.db_log_dir = test::TmpDir(env_);
|
||||
break;
|
||||
case kWalDirAndMmapReads:
|
||||
options.wal_dir = alternative_wal_dir_;
|
||||
// mmap reads should be orthogonal to WalDir setting, so we piggyback to
|
||||
// this option config to test mmap reads as well
|
||||
options.allow_mmap_reads = true;
|
||||
break;
|
||||
case kManifestFileSize:
|
||||
options.max_manifest_file_size = 50; // 50 bytes
|
||||
case kCompactOnFlush:
|
||||
options.purge_redundant_kvs_while_flush =
|
||||
!options.purge_redundant_kvs_while_flush;
|
||||
break;
|
||||
case kPerfOptions:
|
||||
options.soft_rate_limit = 2.0;
|
||||
options.delayed_write_rate = 8 * 1024 * 1024;
|
||||
// TODO(3.13) -- test more options
|
||||
break;
|
||||
case kDeletesFilterFirst:
|
||||
options.filter_deletes = true;
|
||||
break;
|
||||
case kVectorRep:
|
||||
options.memtable_factory.reset(new VectorRepFactory(100));
|
||||
break;
|
||||
case kHashLinkList:
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
||||
options.memtable_factory.reset(
|
||||
NewHashLinkListRepFactory(4, 0, 3, true, 4));
|
||||
break;
|
||||
case kHashCuckoo:
|
||||
options.memtable_factory.reset(
|
||||
NewHashCuckooRepFactory(options.write_buffer_size));
|
||||
break;
|
||||
case kUniversalCompaction:
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.num_levels = 1;
|
||||
break;
|
||||
case kUniversalCompactionMultiLevel:
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.num_levels = 8;
|
||||
break;
|
||||
case kCompressedBlockCache:
|
||||
options.allow_mmap_writes = true;
|
||||
table_options.block_cache_compressed = NewLRUCache(8*1024*1024);
|
||||
break;
|
||||
case kInfiniteMaxOpenFiles:
|
||||
options.max_open_files = -1;
|
||||
break;
|
||||
case kxxHashChecksum: {
|
||||
table_options.checksum = kxxHash;
|
||||
break;
|
||||
}
|
||||
case kFIFOCompaction: {
|
||||
options.compaction_style = kCompactionStyleFIFO;
|
||||
break;
|
||||
}
|
||||
case kBlockBasedTableWithPrefixHashIndex: {
|
||||
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
||||
break;
|
||||
}
|
||||
case kBlockBasedTableWithWholeKeyHashIndex: {
|
||||
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
||||
options.prefix_extractor.reset(NewNoopTransform());
|
||||
break;
|
||||
}
|
||||
case kOptimizeFiltersForHits: {
|
||||
options.optimize_filters_for_hits = true;
|
||||
set_block_based_table_factory = true;
|
||||
break;
|
||||
}
|
||||
case kRowCache: {
|
||||
options.row_cache = NewLRUCache(1024 * 1024);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (options_override.filter_policy) {
|
||||
table_options.filter_policy = options_override.filter_policy;
|
||||
}
|
||||
if (set_block_based_table_factory) {
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
}
|
||||
options.env = env_;
|
||||
options.create_if_missing = true;
|
||||
return options;
|
||||
}
|
||||
|
||||
void DBTestBase::CreateColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const Options& options) {
|
||||
ColumnFamilyOptions cf_opts(options);
|
||||
size_t cfi = handles_.size();
|
||||
handles_.resize(cfi + cfs.size());
|
||||
for (auto cf : cfs) {
|
||||
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
|
||||
}
|
||||
}
|
||||
|
||||
void DBTestBase::CreateAndReopenWithCF(const std::vector<std::string>& cfs,
|
||||
const Options& options) {
|
||||
CreateColumnFamilies(cfs, options);
|
||||
std::vector<std::string> cfs_plus_default = cfs;
|
||||
cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
|
||||
ReopenWithColumnFamilies(cfs_plus_default, options);
|
||||
}
|
||||
|
||||
void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const std::vector<Options>& options) {
|
||||
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
|
||||
}
|
||||
|
||||
void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const Options& options) {
|
||||
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
|
||||
}
|
||||
|
||||
Status DBTestBase::TryReopenWithColumnFamilies(
|
||||
const std::vector<std::string>& cfs,
|
||||
const std::vector<Options>& options) {
|
||||
Close();
|
||||
EXPECT_EQ(cfs.size(), options.size());
|
||||
std::vector<ColumnFamilyDescriptor> column_families;
|
||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
|
||||
}
|
||||
DBOptions db_opts = DBOptions(options[0]);
|
||||
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
|
||||
}
|
||||
|
||||
Status DBTestBase::TryReopenWithColumnFamilies(
|
||||
const std::vector<std::string>& cfs,
|
||||
const Options& options) {
|
||||
Close();
|
||||
std::vector<Options> v_opts(cfs.size(), options);
|
||||
return TryReopenWithColumnFamilies(cfs, v_opts);
|
||||
}
|
||||
|
||||
void DBTestBase::Reopen(const Options& options) {
|
||||
ASSERT_OK(TryReopen(options));
|
||||
}
|
||||
|
||||
void DBTestBase::Close() {
|
||||
for (auto h : handles_) {
|
||||
delete h;
|
||||
}
|
||||
handles_.clear();
|
||||
delete db_;
|
||||
db_ = nullptr;
|
||||
}
|
||||
|
||||
void DBTestBase::DestroyAndReopen(const Options& options) {
|
||||
// Destroy using last options
|
||||
Destroy(last_options_);
|
||||
ASSERT_OK(TryReopen(options));
|
||||
}
|
||||
|
||||
void DBTestBase::Destroy(const Options& options) {
|
||||
Close();
|
||||
ASSERT_OK(DestroyDB(dbname_, options));
|
||||
}
|
||||
|
||||
Status DBTestBase::ReadOnlyReopen(const Options& options) {
|
||||
return DB::OpenForReadOnly(options, dbname_, &db_);
|
||||
}
|
||||
|
||||
Status DBTestBase::TryReopen(const Options& options) {
|
||||
Close();
|
||||
last_options_ = options;
|
||||
return DB::Open(options, dbname_, &db_);
|
||||
}
|
||||
|
||||
Status DBTestBase::Flush(int cf) {
|
||||
if (cf == 0) {
|
||||
return db_->Flush(FlushOptions());
|
||||
} else {
|
||||
return db_->Flush(FlushOptions(), handles_[cf]);
|
||||
}
|
||||
}
|
||||
|
||||
Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo) {
|
||||
if (kMergePut == option_config_) {
|
||||
return db_->Merge(wo, k, v);
|
||||
} else {
|
||||
return db_->Put(wo, k, v);
|
||||
}
|
||||
}
|
||||
|
||||
Status DBTestBase::Put(int cf, const Slice& k, const Slice& v,
|
||||
WriteOptions wo) {
|
||||
if (kMergePut == option_config_) {
|
||||
return db_->Merge(wo, handles_[cf], k, v);
|
||||
} else {
|
||||
return db_->Put(wo, handles_[cf], k, v);
|
||||
}
|
||||
}
|
||||
|
||||
Status DBTestBase::Delete(const std::string& k) {
|
||||
return db_->Delete(WriteOptions(), k);
|
||||
}
|
||||
|
||||
Status DBTestBase::Delete(int cf, const std::string& k) {
|
||||
return db_->Delete(WriteOptions(), handles_[cf], k);
|
||||
}
|
||||
|
||||
std::string DBTestBase::Get(const std::string& k, const Snapshot* snapshot) {
|
||||
ReadOptions options;
|
||||
options.verify_checksums = true;
|
||||
options.snapshot = snapshot;
|
||||
std::string result;
|
||||
Status s = db_->Get(options, k, &result);
|
||||
if (s.IsNotFound()) {
|
||||
result = "NOT_FOUND";
|
||||
} else if (!s.ok()) {
|
||||
result = s.ToString();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string DBTestBase::Get(int cf, const std::string& k,
|
||||
const Snapshot* snapshot) {
|
||||
ReadOptions options;
|
||||
options.verify_checksums = true;
|
||||
options.snapshot = snapshot;
|
||||
std::string result;
|
||||
Status s = db_->Get(options, handles_[cf], k, &result);
|
||||
if (s.IsNotFound()) {
|
||||
result = "NOT_FOUND";
|
||||
} else if (!s.ok()) {
|
||||
result = s.ToString();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
uint64_t DBTestBase::GetNumSnapshots() {
|
||||
uint64_t int_num;
|
||||
EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num));
|
||||
return int_num;
|
||||
}
|
||||
|
||||
uint64_t DBTestBase::GetTimeOldestSnapshots() {
|
||||
uint64_t int_num;
|
||||
EXPECT_TRUE(
|
||||
dbfull()->GetIntProperty("rocksdb.oldest-snapshot-time", &int_num));
|
||||
return int_num;
|
||||
}
|
||||
|
||||
// Return a string that contains all key,value pairs in order,
|
||||
// formatted like "(k1->v1)(k2->v2)".
|
||||
std::string DBTestBase::Contents(int cf) {
|
||||
std::vector<std::string> forward;
|
||||
std::string result;
|
||||
Iterator* iter = (cf == 0) ? db_->NewIterator(ReadOptions())
|
||||
: db_->NewIterator(ReadOptions(), handles_[cf]);
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
std::string s = IterStatus(iter);
|
||||
result.push_back('(');
|
||||
result.append(s);
|
||||
result.push_back(')');
|
||||
forward.push_back(s);
|
||||
}
|
||||
|
||||
// Check reverse iteration results are the reverse of forward results
|
||||
unsigned int matched = 0;
|
||||
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
|
||||
EXPECT_LT(matched, forward.size());
|
||||
EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
|
||||
matched++;
|
||||
}
|
||||
EXPECT_EQ(matched, forward.size());
|
||||
|
||||
delete iter;
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) {
|
||||
Arena arena;
|
||||
ScopedArenaIterator iter;
|
||||
if (cf == 0) {
|
||||
iter.set(dbfull()->TEST_NewInternalIterator(&arena));
|
||||
} else {
|
||||
iter.set(dbfull()->TEST_NewInternalIterator(&arena, handles_[cf]));
|
||||
}
|
||||
InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
|
||||
iter->Seek(target.Encode());
|
||||
std::string result;
|
||||
if (!iter->status().ok()) {
|
||||
result = iter->status().ToString();
|
||||
} else {
|
||||
result = "[ ";
|
||||
bool first = true;
|
||||
while (iter->Valid()) {
|
||||
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
|
||||
if (!ParseInternalKey(iter->key(), &ikey)) {
|
||||
result += "CORRUPTED";
|
||||
} else {
|
||||
if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
|
||||
break;
|
||||
}
|
||||
if (!first) {
|
||||
result += ", ";
|
||||
}
|
||||
first = false;
|
||||
switch (ikey.type) {
|
||||
case kTypeValue:
|
||||
result += iter->value().ToString();
|
||||
break;
|
||||
case kTypeMerge:
|
||||
// keep it the same as kTypeValue for testing kMergePut
|
||||
result += iter->value().ToString();
|
||||
break;
|
||||
case kTypeDeletion:
|
||||
result += "DEL";
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
iter->Next();
|
||||
}
|
||||
if (!first) {
|
||||
result += " ";
|
||||
}
|
||||
result += "]";
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int DBTestBase::NumSortedRuns(int cf) {
|
||||
ColumnFamilyMetaData cf_meta;
|
||||
if (cf == 0) {
|
||||
db_->GetColumnFamilyMetaData(&cf_meta);
|
||||
} else {
|
||||
db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
|
||||
}
|
||||
int num_sr = static_cast<int>(cf_meta.levels[0].files.size());
|
||||
for (size_t i = 1U; i < cf_meta.levels.size(); i++) {
|
||||
if (cf_meta.levels[i].files.size() > 0) {
|
||||
num_sr++;
|
||||
}
|
||||
}
|
||||
return num_sr;
|
||||
}
|
||||
|
||||
uint64_t DBTestBase::TotalSize(int cf) {
|
||||
ColumnFamilyMetaData cf_meta;
|
||||
if (cf == 0) {
|
||||
db_->GetColumnFamilyMetaData(&cf_meta);
|
||||
} else {
|
||||
db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
|
||||
}
|
||||
return cf_meta.size;
|
||||
}
|
||||
|
||||
int DBTestBase::NumTableFilesAtLevel(int level, int cf) {
|
||||
std::string property;
|
||||
if (cf == 0) {
|
||||
// default cfd
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
} else {
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&property));
|
||||
}
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
uint64_t DBTestBase::SizeAtLevel(int level) {
|
||||
std::vector<LiveFileMetaData> metadata;
|
||||
db_->GetLiveFilesMetaData(&metadata);
|
||||
uint64_t sum = 0;
|
||||
for (const auto& m : metadata) {
|
||||
if (m.level == level) {
|
||||
sum += m.size;
|
||||
}
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
int DBTestBase::TotalLiveFiles(int cf) {
|
||||
ColumnFamilyMetaData cf_meta;
|
||||
if (cf == 0) {
|
||||
db_->GetColumnFamilyMetaData(&cf_meta);
|
||||
} else {
|
||||
db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
|
||||
}
|
||||
int num_files = 0;
|
||||
for (auto& level : cf_meta.levels) {
|
||||
num_files += level.files.size();
|
||||
}
|
||||
return num_files;
|
||||
}
|
||||
|
||||
int DBTestBase::TotalTableFiles(int cf, int levels) {
|
||||
if (levels == -1) {
|
||||
levels = CurrentOptions().num_levels;
|
||||
}
|
||||
int result = 0;
|
||||
for (int level = 0; level < levels; level++) {
|
||||
result += NumTableFilesAtLevel(level, cf);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Return spread of files per level
|
||||
std::string DBTestBase::FilesPerLevel(int cf) {
|
||||
int num_levels =
|
||||
(cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
|
||||
std::string result;
|
||||
size_t last_non_zero_offset = 0;
|
||||
for (int level = 0; level < num_levels; level++) {
|
||||
int f = NumTableFilesAtLevel(level, cf);
|
||||
char buf[100];
|
||||
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
|
||||
result += buf;
|
||||
if (f > 0) {
|
||||
last_non_zero_offset = result.size();
|
||||
}
|
||||
}
|
||||
result.resize(last_non_zero_offset);
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t DBTestBase::CountFiles() {
|
||||
std::vector<std::string> files;
|
||||
env_->GetChildren(dbname_, &files);
|
||||
|
||||
std::vector<std::string> logfiles;
|
||||
if (dbname_ != last_options_.wal_dir) {
|
||||
env_->GetChildren(last_options_.wal_dir, &logfiles);
|
||||
}
|
||||
|
||||
return files.size() + logfiles.size();
|
||||
}
|
||||
|
||||
size_t DBTestBase::CountLiveFiles() {
|
||||
std::vector<LiveFileMetaData> metadata;
|
||||
db_->GetLiveFilesMetaData(&metadata);
|
||||
return metadata.size();
|
||||
}
|
||||
|
||||
uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) {
|
||||
Range r(start, limit);
|
||||
uint64_t size;
|
||||
if (cf == 0) {
|
||||
db_->GetApproximateSizes(&r, 1, &size);
|
||||
} else {
|
||||
db_->GetApproximateSizes(handles_[1], &r, 1, &size);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit,
|
||||
uint32_t target_path_id) {
|
||||
CompactRangeOptions compact_options;
|
||||
compact_options.target_path_id = target_path_id;
|
||||
ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
|
||||
}
|
||||
|
||||
void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit) {
|
||||
ASSERT_OK(
|
||||
db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
|
||||
}
|
||||
|
||||
void DBTestBase::Compact(const Slice& start, const Slice& limit) {
|
||||
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit));
|
||||
}
|
||||
|
||||
// Do n memtable compactions, each of which produces an sstable
|
||||
// covering the range [small,large].
|
||||
void DBTestBase::MakeTables(
|
||||
int n, const std::string& small,
|
||||
const std::string& large, int cf) {
|
||||
for (int i = 0; i < n; i++) {
|
||||
ASSERT_OK(Put(cf, small, "begin"));
|
||||
ASSERT_OK(Put(cf, large, "end"));
|
||||
ASSERT_OK(Flush(cf));
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent pushing of new sstables into deeper levels by adding
|
||||
// tables that cover a specified range to all levels.
|
||||
void DBTestBase::FillLevels(
|
||||
const std::string& smallest, const std::string& largest, int cf) {
|
||||
MakeTables(db_->NumberLevels(handles_[cf]), smallest, largest, cf);
|
||||
}
|
||||
|
||||
void DBTestBase::DumpFileCounts(const char* label) {
|
||||
fprintf(stderr, "---\n%s:\n", label);
|
||||
fprintf(stderr, "maxoverlap: %" PRIu64 "\n",
|
||||
dbfull()->TEST_MaxNextLevelOverlappingBytes());
|
||||
for (int level = 0; level < db_->NumberLevels(); level++) {
|
||||
int num = NumTableFilesAtLevel(level);
|
||||
if (num > 0) {
|
||||
fprintf(stderr, " level %3d : %d files\n", level, num);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string DBTestBase::DumpSSTableList() {
|
||||
std::string property;
|
||||
db_->GetProperty("rocksdb.sstables", &property);
|
||||
return property;
|
||||
}
|
||||
|
||||
int DBTestBase::GetSstFileCount(std::string path) {
|
||||
std::vector<std::string> files;
|
||||
env_->GetChildren(path, &files);
|
||||
|
||||
int sst_count = 0;
|
||||
uint64_t number;
|
||||
FileType type;
|
||||
for (size_t i = 0; i < files.size(); i++) {
|
||||
if (ParseFileName(files[i], &number, &type) && type == kTableFile) {
|
||||
sst_count++;
|
||||
}
|
||||
}
|
||||
return sst_count;
|
||||
}
|
||||
|
||||
// this will generate non-overlapping files since it keeps increasing key_idx
|
||||
void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
|
||||
for (int i = 0; i < 11; i++) {
|
||||
ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 10) ? 1 : 10000)));
|
||||
(*key_idx)++;
|
||||
}
|
||||
if (!nowait) {
|
||||
dbfull()->TEST_WaitForFlushMemTable();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
}
|
||||
}
|
||||
|
||||
void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 1000)));
|
||||
}
|
||||
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 1)));
|
||||
if (!nowait) {
|
||||
dbfull()->TEST_WaitForFlushMemTable();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
}
|
||||
}
|
||||
|
||||
std::string DBTestBase::IterStatus(Iterator* iter) {
|
||||
std::string result;
|
||||
if (iter->Valid()) {
|
||||
result = iter->key().ToString() + "->" + iter->value().ToString();
|
||||
} else {
|
||||
result = "(invalid)";
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Options DBTestBase::OptionsForLogIterTest() {
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.WAL_ttl_seconds = 1000;
|
||||
return options;
|
||||
}
|
||||
|
||||
std::unique_ptr<TransactionLogIterator> DBTestBase::OpenTransactionLogIter(
|
||||
const SequenceNumber seq) {
|
||||
unique_ptr<TransactionLogIterator> iter;
|
||||
Status status = dbfull()->GetUpdatesSince(seq, &iter);
|
||||
EXPECT_OK(status);
|
||||
EXPECT_TRUE(iter->Valid());
|
||||
return std::move(iter);
|
||||
}
|
||||
|
||||
std::string DBTestBase::DummyString(size_t len, char c) {
|
||||
return std::string(len, c);
|
||||
}
|
||||
|
||||
void DBTestBase::VerifyIterLast(std::string expected_key, int cf) {
|
||||
Iterator* iter;
|
||||
ReadOptions ro;
|
||||
if (cf == 0) {
|
||||
iter = db_->NewIterator(ro);
|
||||
} else {
|
||||
iter = db_->NewIterator(ro, handles_[cf]);
|
||||
}
|
||||
iter->SeekToLast();
|
||||
ASSERT_EQ(IterStatus(iter), expected_key);
|
||||
delete iter;
|
||||
}
|
||||
|
||||
// Used to test InplaceUpdate
|
||||
|
||||
// If previous value is nullptr or delta is > than previous value,
|
||||
// sets newValue with delta
|
||||
// If previous value is not empty,
|
||||
// updates previous value with 'b' string of previous value size - 1.
|
||||
UpdateStatus DBTestBase::updateInPlaceSmallerSize(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue) {
|
||||
if (prevValue == nullptr) {
|
||||
*newValue = std::string(delta.size(), 'c');
|
||||
return UpdateStatus::UPDATED;
|
||||
} else {
|
||||
*prevSize = *prevSize - 1;
|
||||
std::string str_b = std::string(*prevSize, 'b');
|
||||
memcpy(prevValue, str_b.c_str(), str_b.size());
|
||||
return UpdateStatus::UPDATED_INPLACE;
|
||||
}
|
||||
}
|
||||
|
||||
UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue) {
|
||||
if (prevValue == nullptr) {
|
||||
*newValue = std::string(delta.size(), 'c');
|
||||
return UpdateStatus::UPDATED;
|
||||
} else {
|
||||
*prevSize = 1;
|
||||
std::string str_b = std::string(*prevSize, 'b');
|
||||
memcpy(prevValue, str_b.c_str(), str_b.size());
|
||||
return UpdateStatus::UPDATED_INPLACE;
|
||||
}
|
||||
}
|
||||
|
||||
UpdateStatus DBTestBase::updateInPlaceLargerSize(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue) {
|
||||
*newValue = std::string(delta.size(), 'c');
|
||||
return UpdateStatus::UPDATED;
|
||||
}
|
||||
|
||||
UpdateStatus DBTestBase::updateInPlaceNoAction(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue) {
|
||||
return UpdateStatus::UPDATE_FAILED;
|
||||
}
|
||||
|
||||
// Utility method to test InplaceUpdate
|
||||
void DBTestBase::validateNumberOfEntries(int numValues, int cf) {
|
||||
ScopedArenaIterator iter;
|
||||
Arena arena;
|
||||
if (cf != 0) {
|
||||
iter.set(dbfull()->TEST_NewInternalIterator(&arena, handles_[cf]));
|
||||
} else {
|
||||
iter.set(dbfull()->TEST_NewInternalIterator(&arena));
|
||||
}
|
||||
iter->SeekToFirst();
|
||||
ASSERT_EQ(iter->status().ok(), true);
|
||||
int seq = numValues;
|
||||
while (iter->Valid()) {
|
||||
ParsedInternalKey ikey;
|
||||
ikey.sequence = -1;
|
||||
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
|
||||
|
||||
// checks sequence number for updates
|
||||
ASSERT_EQ(ikey.sequence, (unsigned)seq--);
|
||||
iter->Next();
|
||||
}
|
||||
ASSERT_EQ(0, seq);
|
||||
}
|
||||
|
||||
void DBTestBase::CopyFile(
|
||||
const std::string& source, const std::string& destination,
|
||||
uint64_t size) {
|
||||
const EnvOptions soptions;
|
||||
unique_ptr<SequentialFile> srcfile;
|
||||
ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
|
||||
unique_ptr<WritableFile> destfile;
|
||||
ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
|
||||
|
||||
if (size == 0) {
|
||||
// default argument means copy everything
|
||||
ASSERT_OK(env_->GetFileSize(source, &size));
|
||||
}
|
||||
|
||||
char buffer[4096];
|
||||
Slice slice;
|
||||
while (size > 0) {
|
||||
uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
|
||||
ASSERT_OK(srcfile->Read(one, &slice, buffer));
|
||||
ASSERT_OK(destfile->Append(slice));
|
||||
size -= slice.size();
|
||||
}
|
||||
ASSERT_OK(destfile->Close());
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
628
util/db_test_util.h
Normal file
628
util/db_test_util.h
Normal file
@ -0,0 +1,628 @@
|
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <inttypes.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "db/db_impl.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/filename.h"
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/compaction_filter.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/filter_policy.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/table.h"
|
||||
#include "rocksdb/utilities/checkpoint.h"
|
||||
#include "rocksdb/utilities/convenience.h"
|
||||
#include "table/block_based_table_factory.h"
|
||||
#include "table/mock_table.h"
|
||||
#include "table/plain_table_factory.h"
|
||||
#include "util/compression.h"
|
||||
#include "util/db_test_util.h"
|
||||
#include "util/hash_linklist_rep.h"
|
||||
#include "util/mock_env.h"
|
||||
#include "util/mutexlock.h"
|
||||
#include "util/scoped_arena_iterator.h"
|
||||
#include "util/string_util.h"
|
||||
#include "util/sync_point.h"
|
||||
#include "util/testharness.h"
|
||||
#include "util/testutil.h"
|
||||
#include "util/xfunc.h"
|
||||
#include "utilities/merge_operators.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
static std::string RandomString(Random* rnd, int len) {
|
||||
std::string r;
|
||||
test::RandomString(rnd, len, &r);
|
||||
return r;
|
||||
}
|
||||
|
||||
namespace anon {
|
||||
class AtomicCounter {
|
||||
public:
|
||||
explicit AtomicCounter(Env* env = NULL)
|
||||
: env_(env), cond_count_(&mu_), count_(0) {}
|
||||
|
||||
void Increment() {
|
||||
MutexLock l(&mu_);
|
||||
count_++;
|
||||
cond_count_.SignalAll();
|
||||
}
|
||||
|
||||
int Read() {
|
||||
MutexLock l(&mu_);
|
||||
return count_;
|
||||
}
|
||||
|
||||
bool WaitFor(int count) {
|
||||
MutexLock l(&mu_);
|
||||
|
||||
uint64_t start = env_->NowMicros();
|
||||
while (count_ < count) {
|
||||
uint64_t now = env_->NowMicros();
|
||||
cond_count_.TimedWait(now + /*1s*/ 1 * 000 * 000);
|
||||
if (env_->NowMicros() - start > /*10s*/ 10 * 000 * 000) {
|
||||
return false;
|
||||
}
|
||||
if (count_ < count) {
|
||||
GTEST_LOG_(WARNING) << "WaitFor is taking more time than usual";
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
MutexLock l(&mu_);
|
||||
count_ = 0;
|
||||
cond_count_.SignalAll();
|
||||
}
|
||||
|
||||
private:
|
||||
Env* env_;
|
||||
port::Mutex mu_;
|
||||
port::CondVar cond_count_;
|
||||
int count_;
|
||||
};
|
||||
|
||||
struct OptionsOverride {
|
||||
std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
|
||||
|
||||
// Used as a bit mask of individual enums in which to skip an XF test point
|
||||
int skip_policy = 0;
|
||||
};
|
||||
|
||||
} // namespace anon
|
||||
|
||||
static std::string Key(int i) {
|
||||
char buf[100];
|
||||
snprintf(buf, sizeof(buf), "key%06d", i);
|
||||
return std::string(buf);
|
||||
}
|
||||
|
||||
// Special Env used to delay background operations
|
||||
class SpecialEnv : public EnvWrapper {
|
||||
public:
|
||||
explicit SpecialEnv(Env* base);
|
||||
|
||||
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
|
||||
const EnvOptions& soptions) override {
|
||||
class SSTableFile : public WritableFile {
|
||||
private:
|
||||
SpecialEnv* env_;
|
||||
unique_ptr<WritableFile> base_;
|
||||
|
||||
public:
|
||||
SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
|
||||
: env_(env),
|
||||
base_(std::move(base)) {
|
||||
}
|
||||
Status Append(const Slice& data) override {
|
||||
if (env_->table_write_callback_) {
|
||||
(*env_->table_write_callback_)();
|
||||
}
|
||||
if (env_->drop_writes_.load(std::memory_order_acquire)) {
|
||||
// Drop writes on the floor
|
||||
return Status::OK();
|
||||
} else if (env_->no_space_.load(std::memory_order_acquire)) {
|
||||
return Status::IOError("No space left on device");
|
||||
} else {
|
||||
env_->bytes_written_ += data.size();
|
||||
return base_->Append(data);
|
||||
}
|
||||
}
|
||||
Status Close() override {
|
||||
// Check preallocation size
|
||||
// preallocation size is never passed to base file.
|
||||
size_t preallocation_size = preallocation_block_size();
|
||||
TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus",
|
||||
&preallocation_size);
|
||||
return base_->Close();
|
||||
}
|
||||
Status Flush() override { return base_->Flush(); }
|
||||
Status Sync() override {
|
||||
++env_->sync_counter_;
|
||||
while (env_->delay_sstable_sync_.load(std::memory_order_acquire)) {
|
||||
env_->SleepForMicroseconds(100000);
|
||||
}
|
||||
return base_->Sync();
|
||||
}
|
||||
void SetIOPriority(Env::IOPriority pri) override {
|
||||
base_->SetIOPriority(pri);
|
||||
}
|
||||
};
|
||||
class ManifestFile : public WritableFile {
|
||||
public:
|
||||
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
||||
: env_(env), base_(std::move(b)) { }
|
||||
Status Append(const Slice& data) override {
|
||||
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
|
||||
return Status::IOError("simulated writer error");
|
||||
} else {
|
||||
return base_->Append(data);
|
||||
}
|
||||
}
|
||||
Status Close() override { return base_->Close(); }
|
||||
Status Flush() override { return base_->Flush(); }
|
||||
Status Sync() override {
|
||||
++env_->sync_counter_;
|
||||
if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
|
||||
return Status::IOError("simulated sync error");
|
||||
} else {
|
||||
return base_->Sync();
|
||||
}
|
||||
}
|
||||
uint64_t GetFileSize() override { return base_->GetFileSize(); }
|
||||
|
||||
private:
|
||||
SpecialEnv* env_;
|
||||
unique_ptr<WritableFile> base_;
|
||||
};
|
||||
class WalFile : public WritableFile {
|
||||
public:
|
||||
WalFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
||||
: env_(env), base_(std::move(b)) {}
|
||||
Status Append(const Slice& data) override {
|
||||
if (env_->log_write_error_.load(std::memory_order_acquire)) {
|
||||
return Status::IOError("simulated writer error");
|
||||
} else {
|
||||
int slowdown =
|
||||
env_->log_write_slowdown_.load(std::memory_order_acquire);
|
||||
if (slowdown > 0) {
|
||||
env_->SleepForMicroseconds(slowdown);
|
||||
}
|
||||
return base_->Append(data);
|
||||
}
|
||||
}
|
||||
Status Close() override { return base_->Close(); }
|
||||
Status Flush() override { return base_->Flush(); }
|
||||
Status Sync() override {
|
||||
++env_->sync_counter_;
|
||||
return base_->Sync();
|
||||
}
|
||||
|
||||
private:
|
||||
SpecialEnv* env_;
|
||||
unique_ptr<WritableFile> base_;
|
||||
};
|
||||
|
||||
if (non_writeable_rate_.load(std::memory_order_acquire) > 0) {
|
||||
uint32_t random_number;
|
||||
{
|
||||
MutexLock l(&rnd_mutex_);
|
||||
random_number = rnd_.Uniform(100);
|
||||
}
|
||||
if (random_number < non_writeable_rate_.load()) {
|
||||
return Status::IOError("simulated random write error");
|
||||
}
|
||||
}
|
||||
|
||||
new_writable_count_++;
|
||||
|
||||
if (non_writable_count_.load() > 0) {
|
||||
non_writable_count_--;
|
||||
return Status::IOError("simulated write error");
|
||||
}
|
||||
|
||||
Status s = target()->NewWritableFile(f, r, soptions);
|
||||
if (s.ok()) {
|
||||
if (strstr(f.c_str(), ".sst") != nullptr) {
|
||||
r->reset(new SSTableFile(this, std::move(*r)));
|
||||
} else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
|
||||
r->reset(new ManifestFile(this, std::move(*r)));
|
||||
} else if (strstr(f.c_str(), "log") != nullptr) {
|
||||
r->reset(new WalFile(this, std::move(*r)));
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
Status NewRandomAccessFile(const std::string& f,
|
||||
unique_ptr<RandomAccessFile>* r,
|
||||
const EnvOptions& soptions) override {
|
||||
class CountingFile : public RandomAccessFile {
|
||||
public:
|
||||
CountingFile(unique_ptr<RandomAccessFile>&& target,
|
||||
anon::AtomicCounter* counter)
|
||||
: target_(std::move(target)), counter_(counter) {
|
||||
}
|
||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||
char* scratch) const override {
|
||||
counter_->Increment();
|
||||
return target_->Read(offset, n, result, scratch);
|
||||
}
|
||||
|
||||
private:
|
||||
unique_ptr<RandomAccessFile> target_;
|
||||
anon::AtomicCounter* counter_;
|
||||
};
|
||||
|
||||
Status s = target()->NewRandomAccessFile(f, r, soptions);
|
||||
if (s.ok() && count_random_reads_) {
|
||||
r->reset(new CountingFile(std::move(*r), &random_read_counter_));
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
|
||||
const EnvOptions& soptions) override {
|
||||
class CountingFile : public SequentialFile {
|
||||
public:
|
||||
CountingFile(unique_ptr<SequentialFile>&& target,
|
||||
anon::AtomicCounter* counter)
|
||||
: target_(std::move(target)), counter_(counter) {}
|
||||
virtual Status Read(size_t n, Slice* result, char* scratch) override {
|
||||
counter_->Increment();
|
||||
return target_->Read(n, result, scratch);
|
||||
}
|
||||
virtual Status Skip(uint64_t n) override { return target_->Skip(n); }
|
||||
|
||||
private:
|
||||
unique_ptr<SequentialFile> target_;
|
||||
anon::AtomicCounter* counter_;
|
||||
};
|
||||
|
||||
Status s = target()->NewSequentialFile(f, r, soptions);
|
||||
if (s.ok() && count_sequential_reads_) {
|
||||
r->reset(new CountingFile(std::move(*r), &sequential_read_counter_));
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
virtual void SleepForMicroseconds(int micros) override {
|
||||
sleep_counter_.Increment();
|
||||
if (no_sleep_) {
|
||||
addon_time_.fetch_add(micros);
|
||||
} else {
|
||||
target()->SleepForMicroseconds(micros);
|
||||
}
|
||||
}
|
||||
|
||||
virtual Status GetCurrentTime(int64_t* unix_time) override {
|
||||
Status s = target()->GetCurrentTime(unix_time);
|
||||
if (s.ok()) {
|
||||
*unix_time += addon_time_.load();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
virtual uint64_t NowNanos() override {
|
||||
return target()->NowNanos() + addon_time_.load() * 1000;
|
||||
}
|
||||
|
||||
virtual uint64_t NowMicros() override {
|
||||
return target()->NowMicros() + addon_time_.load();
|
||||
}
|
||||
|
||||
Random rnd_;
|
||||
port::Mutex rnd_mutex_; // Lock to pretect rnd_
|
||||
|
||||
// sstable Sync() calls are blocked while this pointer is non-nullptr.
|
||||
std::atomic<bool> delay_sstable_sync_;
|
||||
|
||||
// Drop writes on the floor while this pointer is non-nullptr.
|
||||
std::atomic<bool> drop_writes_;
|
||||
|
||||
// Simulate no-space errors while this pointer is non-nullptr.
|
||||
std::atomic<bool> no_space_;
|
||||
|
||||
// Simulate non-writable file system while this pointer is non-nullptr
|
||||
std::atomic<bool> non_writable_;
|
||||
|
||||
// Force sync of manifest files to fail while this pointer is non-nullptr
|
||||
std::atomic<bool> manifest_sync_error_;
|
||||
|
||||
// Force write to manifest files to fail while this pointer is non-nullptr
|
||||
std::atomic<bool> manifest_write_error_;
|
||||
|
||||
// Force write to log files to fail while this pointer is non-nullptr
|
||||
std::atomic<bool> log_write_error_;
|
||||
|
||||
// Slow down every log write, in micro-seconds.
|
||||
std::atomic<int> log_write_slowdown_;
|
||||
|
||||
bool count_random_reads_;
|
||||
anon::AtomicCounter random_read_counter_;
|
||||
|
||||
bool count_sequential_reads_;
|
||||
anon::AtomicCounter sequential_read_counter_;
|
||||
|
||||
anon::AtomicCounter sleep_counter_;
|
||||
|
||||
std::atomic<int64_t> bytes_written_;
|
||||
|
||||
std::atomic<int> sync_counter_;
|
||||
|
||||
std::atomic<uint32_t> non_writeable_rate_;
|
||||
|
||||
std::atomic<uint32_t> new_writable_count_;
|
||||
|
||||
std::atomic<uint32_t> non_writable_count_;
|
||||
|
||||
std::function<void()>* table_write_callback_;
|
||||
|
||||
std::atomic<int64_t> addon_time_;
|
||||
bool no_sleep_;
|
||||
};
|
||||
|
||||
class DBTestBase : public testing::Test {
|
||||
protected:
|
||||
// Sequence of option configurations to try
|
||||
enum OptionConfig {
|
||||
kDefault = 0,
|
||||
kBlockBasedTableWithPrefixHashIndex = 1,
|
||||
kBlockBasedTableWithWholeKeyHashIndex = 2,
|
||||
kPlainTableFirstBytePrefix = 3,
|
||||
kPlainTableCappedPrefix = 4,
|
||||
kPlainTableAllBytesPrefix = 5,
|
||||
kVectorRep = 6,
|
||||
kHashLinkList = 7,
|
||||
kHashCuckoo = 8,
|
||||
kMergePut = 9,
|
||||
kFilter = 10,
|
||||
kFullFilter = 11,
|
||||
kUncompressed = 12,
|
||||
kNumLevel_3 = 13,
|
||||
kDBLogDir = 14,
|
||||
kWalDirAndMmapReads = 15,
|
||||
kManifestFileSize = 16,
|
||||
kCompactOnFlush = 17,
|
||||
kPerfOptions = 18,
|
||||
kDeletesFilterFirst = 19,
|
||||
kHashSkipList = 20,
|
||||
kUniversalCompaction = 21,
|
||||
kUniversalCompactionMultiLevel = 22,
|
||||
kCompressedBlockCache = 23,
|
||||
kInfiniteMaxOpenFiles = 24,
|
||||
kxxHashChecksum = 25,
|
||||
kFIFOCompaction = 26,
|
||||
kOptimizeFiltersForHits = 27,
|
||||
kRowCache = 28,
|
||||
kEnd = 29
|
||||
};
|
||||
int option_config_;
|
||||
|
||||
public:
|
||||
std::string dbname_;
|
||||
std::string alternative_wal_dir_;
|
||||
MockEnv* mem_env_;
|
||||
SpecialEnv* env_;
|
||||
DB* db_;
|
||||
std::vector<ColumnFamilyHandle*> handles_;
|
||||
|
||||
Options last_options_;
|
||||
|
||||
// Skip some options, as they may not be applicable to a specific test.
|
||||
// To add more skip constants, use values 4, 8, 16, etc.
|
||||
enum OptionSkip {
|
||||
kNoSkip = 0,
|
||||
kSkipDeletesFilterFirst = 1,
|
||||
kSkipUniversalCompaction = 2,
|
||||
kSkipMergePut = 4,
|
||||
kSkipPlainTable = 8,
|
||||
kSkipHashIndex = 16,
|
||||
kSkipNoSeekToLast = 32,
|
||||
kSkipHashCuckoo = 64,
|
||||
kSkipFIFOCompaction = 128,
|
||||
kSkipMmapReads = 256,
|
||||
};
|
||||
|
||||
explicit DBTestBase(const std::string path);
|
||||
|
||||
~DBTestBase();
|
||||
|
||||
// Switch to a fresh database with the next option configuration to
|
||||
// test. Return false if there are no more configurations to test.
|
||||
bool ChangeOptions(int skip_mask = kNoSkip);
|
||||
|
||||
// Switch between different compaction styles (we have only 2 now).
|
||||
bool ChangeCompactOptions();
|
||||
|
||||
// Switch between different filter policy
|
||||
// Jump from kDefault to kFilter to kFullFilter
|
||||
bool ChangeFilterOptions();
|
||||
|
||||
// Return the current option configuration.
|
||||
Options CurrentOptions(
|
||||
const anon::OptionsOverride& options_override = anon::OptionsOverride());
|
||||
|
||||
Options CurrentOptions(
|
||||
const Options& defaultOptions,
|
||||
const anon::OptionsOverride& options_override = anon::OptionsOverride());
|
||||
|
||||
DBImpl* dbfull() {
|
||||
return reinterpret_cast<DBImpl*>(db_);
|
||||
}
|
||||
|
||||
void CreateColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const Options& options);
|
||||
|
||||
void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
|
||||
const Options& options);
|
||||
|
||||
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const std::vector<Options>& options);
|
||||
|
||||
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const Options& options);
|
||||
|
||||
Status TryReopenWithColumnFamilies(
|
||||
const std::vector<std::string>& cfs,
|
||||
const std::vector<Options>& options);
|
||||
|
||||
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
||||
const Options& options);
|
||||
|
||||
void Reopen(const Options& options);
|
||||
|
||||
void Close();
|
||||
|
||||
void DestroyAndReopen(const Options& options);
|
||||
|
||||
void Destroy(const Options& options);
|
||||
|
||||
Status ReadOnlyReopen(const Options& options);
|
||||
|
||||
Status TryReopen(const Options& options);
|
||||
|
||||
Status Flush(int cf = 0);
|
||||
|
||||
Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions());
|
||||
|
||||
Status Put(int cf, const Slice& k, const Slice& v,
|
||||
WriteOptions wo = WriteOptions());
|
||||
|
||||
Status Delete(const std::string& k);
|
||||
|
||||
Status Delete(int cf, const std::string& k);
|
||||
|
||||
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr);
|
||||
|
||||
std::string Get(int cf, const std::string& k,
|
||||
const Snapshot* snapshot = nullptr);
|
||||
|
||||
uint64_t GetNumSnapshots();
|
||||
|
||||
uint64_t GetTimeOldestSnapshots();
|
||||
|
||||
// Return a string that contains all key,value pairs in order,
|
||||
// formatted like "(k1->v1)(k2->v2)".
|
||||
std::string Contents(int cf = 0);
|
||||
|
||||
std::string AllEntriesFor(const Slice& user_key, int cf = 0);
|
||||
|
||||
int NumSortedRuns(int cf = 0);
|
||||
|
||||
uint64_t TotalSize(int cf = 0);
|
||||
|
||||
int NumTableFilesAtLevel(int level, int cf = 0);
|
||||
|
||||
uint64_t SizeAtLevel(int level);
|
||||
|
||||
int TotalLiveFiles(int cf = 0);
|
||||
|
||||
int TotalTableFiles(int cf = 0, int levels = -1);
|
||||
|
||||
// Return spread of files per level
|
||||
std::string FilesPerLevel(int cf = 0);
|
||||
|
||||
size_t CountFiles();
|
||||
|
||||
size_t CountLiveFiles();
|
||||
|
||||
uint64_t Size(const Slice& start, const Slice& limit, int cf = 0);
|
||||
|
||||
void Compact(int cf, const Slice& start, const Slice& limit,
|
||||
uint32_t target_path_id);
|
||||
|
||||
void Compact(int cf, const Slice& start, const Slice& limit);
|
||||
|
||||
void Compact(const Slice& start, const Slice& limit);
|
||||
|
||||
// Do n memtable compactions, each of which produces an sstable
|
||||
// covering the range [small,large].
|
||||
void MakeTables(int n, const std::string& small, const std::string& large,
|
||||
int cf = 0);
|
||||
|
||||
// Prevent pushing of new sstables into deeper levels by adding
|
||||
// tables that cover a specified range to all levels.
|
||||
void FillLevels(const std::string& smallest, const std::string& largest,
|
||||
int cf);
|
||||
|
||||
void DumpFileCounts(const char* label);
|
||||
|
||||
std::string DumpSSTableList();
|
||||
|
||||
int GetSstFileCount(std::string path);
|
||||
|
||||
// this will generate non-overlapping files since it keeps increasing key_idx
|
||||
void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false);
|
||||
|
||||
void GenerateNewRandomFile(Random* rnd, bool nowait = false);
|
||||
|
||||
std::string IterStatus(Iterator* iter);
|
||||
|
||||
Options OptionsForLogIterTest();
|
||||
|
||||
std::unique_ptr<TransactionLogIterator> OpenTransactionLogIter(
|
||||
const SequenceNumber seq);
|
||||
|
||||
std::string DummyString(size_t len, char c = 'a');
|
||||
|
||||
void VerifyIterLast(std::string expected_key, int cf = 0);
|
||||
|
||||
// Used to test InplaceUpdate
|
||||
|
||||
// If previous value is nullptr or delta is > than previous value,
|
||||
// sets newValue with delta
|
||||
// If previous value is not empty,
|
||||
// updates previous value with 'b' string of previous value size - 1.
|
||||
static UpdateStatus updateInPlaceSmallerSize(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue);
|
||||
|
||||
static UpdateStatus updateInPlaceSmallerVarintSize(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue);
|
||||
|
||||
static UpdateStatus updateInPlaceLargerSize(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue);
|
||||
|
||||
static UpdateStatus updateInPlaceNoAction(
|
||||
char* prevValue, uint32_t* prevSize,
|
||||
Slice delta, std::string* newValue);
|
||||
|
||||
// Utility method to test InplaceUpdate
|
||||
void validateNumberOfEntries(int numValues, int cf = 0);
|
||||
|
||||
void CopyFile(const std::string& source, const std::string& destination,
|
||||
uint64_t size = 0);
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
Loading…
Reference in New Issue
Block a user