[CF] DB test to run on non-default column family

Summary:
This is a huge diff and it was hectic, but the idea is actually quite simple. Every operation (Put, Get, etc.) done on default column family in DBTest is now forwarded to non-default ("pikachu"). The good news is that we had zero test failures! Column families look stable so far.

One interesting test that I adapted for column families is MultiThreadedTest. I replaced every Put() with a WriteBatch writing to all column families concurrently. Every Put in the write batch contains unique_id. Instead of Get() I do a multiget across all column families with the same key. If atomicity holds, I expect to see the same unique_id in all column families.

Test Plan: This is a test!

Reviewers: dhruba, haobo, kailiu, sdong

CC: leveldb

Differential Revision: https://reviews.facebook.net/D16149
This commit is contained in:
Igor Canadi 2014-02-07 14:47:16 -08:00
parent 39ae9f7988
commit c67d48c852
5 changed files with 1259 additions and 1033 deletions

View File

@ -355,53 +355,6 @@ TEST(ColumnFamilyTest, FlushTest) {
Close(); Close();
} }
// This is the same as DBTest::ManualCompaction, but it does all
// operations on non-default column family
TEST(ColumnFamilyTest, ManualCompaction) {
// iter - 0 with 7 levels
// iter - 1 with 3 levels
int cf = 1;
for (int iter = 0; iter < 2; ++iter) {
column_family_options_.num_levels = (iter == 0) ? 3 : 7;
Destroy();
ASSERT_OK(Open({"default"}));
CreateColumnFamilies({"one"});
Close();
ASSERT_OK(Open({"default", "one"}));
MakeTables(cf, 3, "p", "q");
ASSERT_EQ("1,1,1", FilesPerLevel(cf));
// Compaction range falls before files
Compact(cf, "", "c");
ASSERT_EQ("1,1,1", FilesPerLevel(cf));
// Compaction range falls after files
Compact(cf, "r", "z");
ASSERT_EQ("1,1,1", FilesPerLevel(cf));
// Compaction range overlaps files
Compact(cf, "p1", "p9");
ASSERT_EQ("0,0,1", FilesPerLevel(cf));
// Populate a different range
MakeTables(cf, 3, "c", "e");
ASSERT_EQ("1,1,2", FilesPerLevel(cf));
// Compact just the new range
Compact(cf, "b", "f");
ASSERT_EQ("0,0,2", FilesPerLevel(cf));
// Compact all
MakeTables(cf, 1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel(cf));
Compact(cf, "", "zzz");
ASSERT_EQ("0,0,1", FilesPerLevel(cf));
}
Close();
}
} // namespace rocksdb } // namespace rocksdb
int main(int argc, char** argv) { int main(int argc, char** argv) {

View File

@ -74,7 +74,9 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
// Make a set of all of the live *.sst files // Make a set of all of the live *.sst files
std::set<uint64_t> live; std::set<uint64_t> live;
default_cf_handle_->cfd()->current()->AddLiveFiles(&live); for (auto cfd : *versions_->GetColumnFamilySet()) {
cfd->current()->AddLiveFiles(&live);
}
ret.clear(); ret.clear();
ret.reserve(live.size() + 2); //*.sst + CURRENT + MANIFEST ret.reserve(live.size() + 2); //*.sst + CURRENT + MANIFEST

View File

@ -1681,15 +1681,21 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
return manual.status; return manual.status;
} }
Status DBImpl::TEST_CompactRange(int level, Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
const Slice* begin, const Slice* end,
const Slice* end) { ColumnFamilyHandle* column_family) {
auto default_cfd = default_cf_handle_->cfd(); ColumnFamilyData* cfd;
if (column_family == nullptr) {
cfd = default_cf_handle_->cfd();
} else {
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
cfd = cfh->cfd();
}
int output_level = int output_level =
(default_cfd->options()->compaction_style == kCompactionStyleUniversal) (cfd->options()->compaction_style == kCompactionStyleUniversal)
? level ? level
: level + 1; : level + 1;
return RunManualCompaction(default_cfd, level, output_level, begin, end); return RunManualCompaction(cfd, level, output_level, begin, end);
} }
Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
@ -1720,8 +1726,15 @@ Status DBImpl::TEST_FlushMemTable() {
return FlushMemTable(default_cf_handle_->cfd(), FlushOptions()); return FlushMemTable(default_cf_handle_->cfd(), FlushOptions());
} }
Status DBImpl::TEST_WaitForFlushMemTable() { Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) {
return WaitForFlushMemTable(default_cf_handle_->cfd()); ColumnFamilyData* cfd;
if (column_family == nullptr) {
cfd = default_cf_handle_->cfd();
} else {
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
cfd = cfh->cfd();
}
return WaitForFlushMemTable(cfd);
} }
Status DBImpl::TEST_WaitForCompact() { Status DBImpl::TEST_WaitForCompact() {
@ -2725,13 +2738,19 @@ ColumnFamilyHandle* DBImpl::DefaultColumnFamily() const {
return default_cf_handle_; return default_cf_handle_;
} }
Iterator* DBImpl::TEST_NewInternalIterator() { Iterator* DBImpl::TEST_NewInternalIterator(ColumnFamilyHandle* column_family) {
ColumnFamilyData* cfd;
if (column_family == nullptr) {
cfd = default_cf_handle_->cfd();
} else {
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
cfd = cfh->cfd();
}
mutex_.Lock(); mutex_.Lock();
SuperVersion* super_version = SuperVersion* super_version = cfd->GetSuperVersion()->Ref();
default_cf_handle_->cfd()->GetSuperVersion()->Ref();
mutex_.Unlock(); mutex_.Unlock();
return NewInternalIterator(ReadOptions(), default_cf_handle_->cfd(), return NewInternalIterator(ReadOptions(), cfd, super_version);
super_version);
} }
std::pair<Iterator*, Iterator*> DBImpl::GetTailingIteratorPair( std::pair<Iterator*, Iterator*> DBImpl::GetTailingIteratorPair(
@ -2773,9 +2792,17 @@ std::pair<Iterator*, Iterator*> DBImpl::GetTailingIteratorPair(
return std::make_pair(mutable_iter, immutable_iter); return std::make_pair(mutable_iter, immutable_iter);
} }
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() { int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes(
ColumnFamilyHandle* column_family) {
ColumnFamilyData* cfd;
if (column_family == nullptr) {
cfd = default_cf_handle_->cfd();
} else {
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
cfd = cfh->cfd();
}
MutexLock l(&mutex_); MutexLock l(&mutex_);
return default_cf_handle_->cfd()->current()->MaxNextLevelOverlappingBytes(); return cfd->current()->MaxNextLevelOverlappingBytes();
} }
Status DBImpl::Get(const ReadOptions& options, Status DBImpl::Get(const ReadOptions& options,
@ -2853,6 +2880,7 @@ Status DBImpl::GetImpl(const ReadOptions& options,
// Done // Done
RecordTick(options_.statistics.get(), MEMTABLE_HIT); RecordTick(options_.statistics.get(), MEMTABLE_HIT);
} else { } else {
// Done
StopWatchNano from_files_timer(env_, false); StopWatchNano from_files_timer(env_, false);
StartPerfTimer(&from_files_timer); StartPerfTimer(&from_files_timer);
@ -3707,12 +3735,14 @@ void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
} }
void DBImpl::TEST_GetFilesMetaData( void DBImpl::TEST_GetFilesMetaData(
ColumnFamilyHandle* column_family,
std::vector<std::vector<FileMetaData>>* metadata) { std::vector<std::vector<FileMetaData>>* metadata) {
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
auto cfd = cfh->cfd();
MutexLock l(&mutex_); MutexLock l(&mutex_);
metadata->resize(NumberLevels()); metadata->resize(NumberLevels());
for (int level = 0; level < NumberLevels(); level++) { for (int level = 0; level < NumberLevels(); level++) {
const std::vector<FileMetaData*>& files = const std::vector<FileMetaData*>& files = cfd->current()->files_[level];
default_cf_handle_->cfd()->current()->files_[level];
(*metadata)[level].clear(); (*metadata)[level].clear();
for (const auto& f : files) { for (const auto& f : files) {

View File

@ -135,15 +135,14 @@ class DBImpl : public DB {
// Extra methods (for testing) that are not in the public DB interface // Extra methods (for testing) that are not in the public DB interface
// Compact any files in the named level that overlap [*begin, *end] // Compact any files in the named level that overlap [*begin, *end]
Status TEST_CompactRange(int level, Status TEST_CompactRange(int level, const Slice* begin, const Slice* end,
const Slice* begin, ColumnFamilyHandle* column_family = nullptr);
const Slice* end);
// Force current memtable contents to be flushed. // Force current memtable contents to be flushed.
Status TEST_FlushMemTable(); Status TEST_FlushMemTable();
// Wait for memtable compaction // Wait for memtable compaction
Status TEST_WaitForFlushMemTable(); Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr);
// Wait for any compaction // Wait for any compaction
Status TEST_WaitForCompact(); Status TEST_WaitForCompact();
@ -151,11 +150,13 @@ class DBImpl : public DB {
// Return an internal iterator over the current state of the database. // Return an internal iterator over the current state of the database.
// The keys of this iterator are internal keys (see format.h). // The keys of this iterator are internal keys (see format.h).
// The returned iterator should be deleted when no longer needed. // The returned iterator should be deleted when no longer needed.
Iterator* TEST_NewInternalIterator(); Iterator* TEST_NewInternalIterator(ColumnFamilyHandle* column_family =
nullptr);
// Return the maximum overlapping data (in bytes) at next level for any // Return the maximum overlapping data (in bytes) at next level for any
// file at a level >= 1. // file at a level >= 1.
int64_t TEST_MaxNextLevelOverlappingBytes(); int64_t TEST_MaxNextLevelOverlappingBytes(ColumnFamilyHandle* column_family =
nullptr);
// Simulate a db crash, no elegant closing of database. // Simulate a db crash, no elegant closing of database.
void TEST_Destroy_DBImpl(); void TEST_Destroy_DBImpl();
@ -174,7 +175,8 @@ class DBImpl : public DB {
default_interval_to_delete_obsolete_WAL_ = default_interval_to_delete_obsolete_WAL; default_interval_to_delete_obsolete_WAL_ = default_interval_to_delete_obsolete_WAL;
} }
void TEST_GetFilesMetaData(std::vector<std::vector<FileMetaData>>* metadata); void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family,
std::vector<std::vector<FileMetaData>>* metadata);
// needed for CleanupIteratorState // needed for CleanupIteratorState
struct DeletionState { struct DeletionState {

File diff suppressed because it is too large Load Diff