2014-11-14 20:35:48 +01:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
#include <algorithm>
|
2014-11-14 20:35:48 +01:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
2015-08-08 06:59:51 +02:00
|
|
|
#include <tuple>
|
2014-11-14 20:35:48 +01:00
|
|
|
|
|
|
|
#include "db/compaction_job.h"
|
|
|
|
#include "db/column_family.h"
|
|
|
|
#include "db/version_set.h"
|
2014-12-02 21:09:20 +01:00
|
|
|
#include "db/writebuffer.h"
|
2014-11-14 20:35:48 +01:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/db.h"
|
2015-08-08 06:59:51 +02:00
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "table/mock_table.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
#include "util/file_reader_writer.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2014-11-14 20:35:48 +01:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
2015-08-08 06:59:51 +02:00
|
|
|
#include "utilities/merge_operators.h"
|
2014-11-14 20:35:48 +01:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
2015-07-16 18:18:35 +02:00
|
|
|
namespace {
|
2015-08-08 06:59:51 +02:00
|
|
|
|
2015-07-16 18:18:35 +02:00
|
|
|
void VerifyInitializationOfCompactionJobStats(
|
|
|
|
const CompactionJobStats& compaction_job_stats) {
|
|
|
|
#if !defined(IOS_CROSS_COMPILE)
|
|
|
|
ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files_at_output_level, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_files, 0U);
|
|
|
|
|
2015-08-11 23:47:14 +02:00
|
|
|
ASSERT_EQ(compaction_job_stats.is_manual_compaction, true);
|
2015-07-16 18:18:35 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_output_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_key_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_value_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.smallest_output_key_prefix[0], 0);
|
|
|
|
ASSERT_EQ(compaction_job_stats.largest_output_key_prefix[0], 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_records_replaced, 0U);
|
2015-07-29 01:41:40 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_deletion_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_expired_deletion_records, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_corrupt_keys, 0U);
|
2015-07-16 18:18:35 +02:00
|
|
|
#endif // !defined(IOS_CROSS_COMPILE)
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
// TODO(icanadi) Make it simpler once we mock out VersionSet
|
2015-03-17 22:08:00 +01:00
|
|
|
class CompactionJobTest : public testing::Test {
|
2014-11-14 20:35:48 +01:00
|
|
|
public:
|
|
|
|
CompactionJobTest()
|
|
|
|
: env_(Env::Default()),
|
|
|
|
dbname_(test::TmpDir() + "/compaction_job_test"),
|
2014-11-14 22:42:13 +01:00
|
|
|
mutable_cf_options_(Options(), ImmutableCFOptions(Options())),
|
2015-03-17 23:04:37 +01:00
|
|
|
table_cache_(NewLRUCache(50000, 16)),
|
2014-12-02 21:09:20 +01:00
|
|
|
write_buffer_(db_options_.db_write_buffer_size),
|
2014-11-14 20:35:48 +01:00
|
|
|
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
2014-12-02 21:09:20 +01:00
|
|
|
table_cache_.get(), &write_buffer_,
|
|
|
|
&write_controller_)),
|
2014-11-14 20:35:48 +01:00
|
|
|
shutting_down_(false),
|
|
|
|
mock_table_factory_(new mock::MockTableFactory()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
2014-11-14 20:35:48 +01:00
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GenerateFileName(uint64_t file_number) {
|
|
|
|
FileMetaData meta;
|
|
|
|
std::vector<DbPath> db_paths;
|
|
|
|
db_paths.emplace_back(dbname_, std::numeric_limits<uint64_t>::max());
|
|
|
|
meta.fd = FileDescriptor(file_number, 0, 0);
|
|
|
|
return TableFileName(db_paths, meta.fd.GetNumber(), meta.fd.GetPathId());
|
|
|
|
}
|
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
std::string KeyStr(const std::string& user_key, const SequenceNumber seq_num,
|
|
|
|
const ValueType t) {
|
|
|
|
return InternalKey(user_key, seq_num, t).Encode().ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
void AddMockFile(const mock::MockFileContents& contents, int level = 0) {
|
|
|
|
assert(contents.size() > 0);
|
|
|
|
|
|
|
|
bool first_key = true;
|
|
|
|
std::string smallest, largest;
|
|
|
|
InternalKey smallest_key, largest_key;
|
|
|
|
SequenceNumber smallest_seqno = kMaxSequenceNumber;
|
|
|
|
SequenceNumber largest_seqno = 0;
|
|
|
|
for (auto kv : contents) {
|
|
|
|
ParsedInternalKey key;
|
|
|
|
std::string skey;
|
|
|
|
std::string value;
|
|
|
|
std::tie(skey, value) = kv;
|
|
|
|
ParseInternalKey(skey, &key);
|
|
|
|
|
|
|
|
smallest_seqno = std::min(smallest_seqno, key.sequence);
|
|
|
|
largest_seqno = std::max(largest_seqno, key.sequence);
|
|
|
|
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, smallest) < 0) {
|
|
|
|
smallest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
smallest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, largest) > 0) {
|
|
|
|
largest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
largest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
|
|
|
|
first_key = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t file_number = versions_->NewFileNumber();
|
|
|
|
EXPECT_OK(mock_table_factory_->CreateMockTable(
|
|
|
|
env_, GenerateFileName(file_number), std::move(contents)));
|
|
|
|
|
|
|
|
VersionEdit edit;
|
|
|
|
edit.AddFile(level, file_number, 0, 10, smallest_key, largest_key,
|
|
|
|
smallest_seqno, largest_seqno, false);
|
|
|
|
|
|
|
|
mutex_.Lock();
|
|
|
|
versions_->LogAndApply(versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
mutable_cf_options_, &edit, &mutex_);
|
|
|
|
mutex_.Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetLastSequence(const SequenceNumber sequence_number) {
|
|
|
|
versions_->SetLastSequence(sequence_number + 1);
|
|
|
|
}
|
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
// returns expected result after compaction
|
2015-07-16 18:18:35 +02:00
|
|
|
mock::MockFileContents CreateTwoFiles(bool gen_corrupted_keys) {
|
2014-11-14 20:35:48 +01:00
|
|
|
mock::MockFileContents expected_results;
|
|
|
|
const int kKeysPerFile = 10000;
|
2015-07-16 18:18:35 +02:00
|
|
|
const int kCorruptKeysPerFile = 200;
|
|
|
|
const int kMatchingKeys = kKeysPerFile / 2;
|
2014-11-14 20:35:48 +01:00
|
|
|
SequenceNumber sequence_number = 0;
|
2015-07-16 18:18:35 +02:00
|
|
|
|
|
|
|
auto corrupt_id = [&](int id) {
|
|
|
|
return gen_corrupted_keys && id > 0 && id <= kCorruptKeysPerFile;
|
|
|
|
};
|
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
mock::MockFileContents contents;
|
|
|
|
for (int k = 0; k < kKeysPerFile; ++k) {
|
2015-07-16 18:18:35 +02:00
|
|
|
auto key = ToString(i * kMatchingKeys + k);
|
2014-11-25 05:44:49 +01:00
|
|
|
auto value = ToString(i * kKeysPerFile + k);
|
2014-11-14 20:35:48 +01:00
|
|
|
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-11 00:01:54 +02:00
|
|
|
// This is how the key will look like once it's written in bottommost
|
|
|
|
// file
|
|
|
|
InternalKey bottommost_internal_key(key, 0, kTypeValue);
|
2015-07-16 18:18:35 +02:00
|
|
|
if (corrupt_id(k)) {
|
Simplify querying of merge results
Summary:
While working on supporting mixing merge operators with
single deletes ( https://reviews.facebook.net/D43179 ),
I realized that returning and dealing with merge results
can be made simpler. Submitting this as a separate diff
because it is not directly related to single deletes.
Before, callers of merge helper had to retrieve the merge
result in one of two ways depending on whether the merge
was successful or not (success = result of merge was single
kTypeValue). For successful merges, the caller could query
the resulting key/value pair and for unsuccessful merges,
the result could be retrieved in the form of two deques of
keys and values. However, with single deletes, a successful merge
does not return a single key/value pair (if merge
operands are merged with a single delete, we have to generate
a value and keep the original single delete around to make
sure that we are not accidentially producing a key overwrite).
In addition, the two existing call sites of the merge
helper were taking the same actions independently from whether
the merge was successful or not, so this patch simplifies that.
Test Plan: make clean all check
Reviewers: rven, sdong, yhchiang, anthony, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43353
2015-08-18 02:34:38 +02:00
|
|
|
test::CorruptKeyType(&internal_key);
|
|
|
|
test::CorruptKeyType(&bottommost_internal_key);
|
2015-07-16 18:18:35 +02:00
|
|
|
}
|
2015-08-08 06:59:51 +02:00
|
|
|
contents.insert({ internal_key.Encode().ToString(), value });
|
2015-07-16 18:18:35 +02:00
|
|
|
if (i == 1 || k < kMatchingKeys || corrupt_id(k - kMatchingKeys)) {
|
2015-07-06 20:14:08 +02:00
|
|
|
expected_results.insert(
|
2015-08-08 06:59:51 +02:00
|
|
|
{ bottommost_internal_key.Encode().ToString(), value });
|
2014-11-14 20:35:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
AddMockFile(contents);
|
|
|
|
}
|
2014-11-14 20:35:48 +01:00
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
SetLastSequence(sequence_number);
|
2014-11-14 20:35:48 +01:00
|
|
|
|
|
|
|
return expected_results;
|
|
|
|
}
|
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
void NewDB(std::shared_ptr<MergeOperator> merge_operator = nullptr) {
|
2014-11-14 20:35:48 +01:00
|
|
|
VersionEdit new_db;
|
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
|
|
|
unique_ptr<WritableFile> file;
|
|
|
|
Status s = env_->NewWritableFile(
|
|
|
|
manifest, &file, env_->OptimizeForManifestWrite(env_options_));
|
|
|
|
ASSERT_OK(s);
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<WritableFileWriter> file_writer(
|
2015-08-08 06:59:51 +02:00
|
|
|
new WritableFileWriter(std::move(file), env_options_));
|
2014-11-14 20:35:48 +01:00
|
|
|
{
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
log::Writer log(std::move(file_writer));
|
2014-11-14 20:35:48 +01:00
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
|
|
|
s = SetCurrentFile(env_, dbname_, 1, nullptr);
|
2015-08-08 06:59:51 +02:00
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
|
|
|
cf_options_.merge_operator = merge_operator;
|
|
|
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
|
|
|
|
|
|
|
EXPECT_OK(versions_->Recover(column_families, false));
|
|
|
|
cfd_ = versions_->GetColumnFamilySet()->GetDefault();
|
2014-11-14 20:35:48 +01:00
|
|
|
}
|
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
void RunCompaction(const std::vector<std::vector<FileMetaData*>>& input_files,
|
|
|
|
const mock::MockFileContents& expected_results) {
|
2015-07-16 18:18:35 +02:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
|
2015-08-08 06:59:51 +02:00
|
|
|
size_t num_input_files = 0;
|
|
|
|
std::vector<CompactionInputFiles> compaction_input_files;
|
|
|
|
for (size_t level = 0; level < input_files.size(); level++) {
|
|
|
|
auto level_files = input_files[level];
|
|
|
|
CompactionInputFiles compaction_level;
|
2015-08-10 20:30:36 +02:00
|
|
|
compaction_level.level = static_cast<int>(level);
|
2015-08-08 06:59:51 +02:00
|
|
|
compaction_level.files.insert(compaction_level.files.end(),
|
|
|
|
level_files.begin(), level_files.end());
|
|
|
|
compaction_input_files.push_back(compaction_level);
|
|
|
|
num_input_files += level_files.size();
|
2015-07-16 18:18:35 +02:00
|
|
|
}
|
2015-08-08 06:59:51 +02:00
|
|
|
|
2015-07-16 18:18:35 +02:00
|
|
|
Compaction compaction(cfd->current()->storage_info(),
|
|
|
|
*cfd->GetLatestMutableCFOptions(),
|
2015-08-08 06:59:51 +02:00
|
|
|
compaction_input_files, 1, 1024 * 1024, 10, 0,
|
2015-08-11 23:47:14 +02:00
|
|
|
kNoCompression, {}, true);
|
2015-07-16 18:18:35 +02:00
|
|
|
compaction.SetInputVersion(cfd->current());
|
|
|
|
|
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get());
|
|
|
|
mutex_.Lock();
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
Add options.compaction_measure_io_stats to print write I/O stats in compactions
Summary:
Add options.compaction_measure_io_stats to print out / pass to listener accumulated time spent on write calls. Example outputs in info logs:
2015/08/12-16:27:59.463944 7fd428bff700 (Original Log Time 2015/08/12-16:27:59.463922) EVENT_LOG_v1 {"time_micros": 1439422079463897, "job": 6, "event": "compaction_finished", "output_level": 1, "num_output_files": 4, "total_output_size": 6900525, "num_input_records": 111483, "num_output_records": 106877, "file_write_nanos": 15663206, "file_range_sync_nanos": 649588, "file_fsync_nanos": 349614797, "file_prepare_write_nanos": 1505812, "lsm_state": [2, 4, 0, 0, 0, 0, 0]}
Add two more counters in iostats_context.
Also add a parameter of db_bench.
Test Plan: Add a unit test. Also manually verify LOG outputs in db_bench
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D44115
2015-08-13 02:24:45 +02:00
|
|
|
CompactionJob compaction_job(0, &compaction, db_options_, env_options_,
|
|
|
|
versions_.get(), &shutting_down_, &log_buffer,
|
|
|
|
nullptr, nullptr, nullptr, {}, table_cache_,
|
|
|
|
&event_logger, false, false, dbname_,
|
|
|
|
&compaction_job_stats_);
|
2015-07-16 18:18:35 +02:00
|
|
|
|
2015-07-29 01:41:40 +02:00
|
|
|
VerifyInitializationOfCompactionJobStats(compaction_job_stats_);
|
2015-07-16 18:18:35 +02:00
|
|
|
|
|
|
|
compaction_job.Prepare();
|
|
|
|
mutex_.Unlock();
|
|
|
|
Status s;
|
2015-08-08 06:59:51 +02:00
|
|
|
s = compaction_job.Run();
|
|
|
|
ASSERT_OK(s);
|
|
|
|
mutex_.Lock();
|
2015-08-18 20:06:23 +02:00
|
|
|
ASSERT_OK(compaction_job.Install(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
&mutex_));
|
2015-07-16 18:18:35 +02:00
|
|
|
mutex_.Unlock();
|
|
|
|
|
2015-07-29 01:41:40 +02:00
|
|
|
ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
|
2015-08-08 06:59:51 +02:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
|
2015-07-29 01:41:40 +02:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files, 1U);
|
2015-08-08 06:59:51 +02:00
|
|
|
mock_table_factory_->AssertLatestFile(expected_results);
|
2015-07-16 18:18:35 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
Env* env_;
|
|
|
|
std::string dbname_;
|
|
|
|
EnvOptions env_options_;
|
|
|
|
MutableCFOptions mutable_cf_options_;
|
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
|
|
|
DBOptions db_options_;
|
|
|
|
ColumnFamilyOptions cf_options_;
|
2014-12-02 21:09:20 +01:00
|
|
|
WriteBuffer write_buffer_;
|
2014-11-14 20:35:48 +01:00
|
|
|
std::unique_ptr<VersionSet> versions_;
|
2015-02-05 06:39:45 +01:00
|
|
|
InstrumentedMutex mutex_;
|
2014-11-14 20:35:48 +01:00
|
|
|
std::atomic<bool> shutting_down_;
|
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
2015-07-29 01:41:40 +02:00
|
|
|
CompactionJobStats compaction_job_stats_;
|
2015-08-08 06:59:51 +02:00
|
|
|
ColumnFamilyData* cfd_;
|
2014-11-14 20:35:48 +01:00
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionJobTest, Simple) {
|
2015-08-08 06:59:51 +02:00
|
|
|
NewDB();
|
|
|
|
|
2015-07-16 18:18:35 +02:00
|
|
|
auto expected_results = CreateTwoFiles(false);
|
2014-11-14 20:35:48 +01:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(0);
|
|
|
|
ASSERT_EQ(2U, files.size());
|
2015-08-08 06:59:51 +02:00
|
|
|
RunCompaction({ files }, expected_results);
|
2014-11-14 20:35:48 +01:00
|
|
|
}
|
|
|
|
|
2015-07-16 18:18:35 +02:00
|
|
|
TEST_F(CompactionJobTest, SimpleCorrupted) {
|
2015-08-08 06:59:51 +02:00
|
|
|
NewDB();
|
|
|
|
|
2015-07-16 18:18:35 +02:00
|
|
|
auto expected_results = CreateTwoFiles(true);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(0);
|
2015-08-08 06:59:51 +02:00
|
|
|
RunCompaction({ files }, expected_results);
|
2015-07-29 01:41:40 +02:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_corrupt_keys, 400U);
|
2015-08-08 06:59:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleDeletion) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
mock::MockFileContents file1 = {
|
|
|
|
{ KeyStr("c", 4U, kTypeDeletion), "" },
|
|
|
|
{ KeyStr("c", 3U, kTypeValue), "val" }
|
|
|
|
};
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
mock::MockFileContents file2 = {
|
|
|
|
{ KeyStr("b", 2U, kTypeValue), "val" },
|
|
|
|
{ KeyStr("b", 1U, kTypeValue), "val" }
|
|
|
|
};
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
mock::MockFileContents expected_results = {
|
|
|
|
{ KeyStr("b", 0U, kTypeValue), "val" }
|
|
|
|
};
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({ files }, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleOverwrite) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
mock::MockFileContents file1 = {
|
|
|
|
{ KeyStr("a", 3U, kTypeValue), "val2" },
|
|
|
|
{ KeyStr("b", 4U, kTypeValue), "val3" },
|
|
|
|
};
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
mock::MockFileContents file2 = {
|
|
|
|
{ KeyStr("a", 1U, kTypeValue), "val" },
|
|
|
|
{ KeyStr("b", 2U, kTypeValue), "val" }
|
|
|
|
};
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
mock::MockFileContents expected_results = {
|
|
|
|
{ KeyStr("a", 0U, kTypeValue), "val2" },
|
|
|
|
{ KeyStr("b", 0U, kTypeValue), "val3" }
|
|
|
|
};
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({ files }, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleNonLastLevel) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
mock::MockFileContents file1 = {
|
|
|
|
{ KeyStr("a", 5U, kTypeValue), "val2" },
|
|
|
|
{ KeyStr("b", 6U, kTypeValue), "val3" },
|
|
|
|
};
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
mock::MockFileContents file2 = {
|
|
|
|
{ KeyStr("a", 3U, kTypeValue), "val" },
|
|
|
|
{ KeyStr("b", 4U, kTypeValue), "val" }
|
|
|
|
};
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
mock::MockFileContents file3 = {
|
|
|
|
{ KeyStr("a", 1U, kTypeValue), "val" },
|
|
|
|
{ KeyStr("b", 2U, kTypeValue), "val" }
|
|
|
|
};
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
// Because level 1 is not the last level, the sequence numbers of a and b
|
|
|
|
// cannot be set to 0
|
|
|
|
mock::MockFileContents expected_results = {
|
|
|
|
{ KeyStr("a", 5U, kTypeValue), "val2" },
|
|
|
|
{ KeyStr("b", 6U, kTypeValue), "val3" }
|
|
|
|
};
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
RunCompaction({ lvl0_files, lvl1_files }, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleMerge) {
|
|
|
|
auto merge_op = MergeOperators::CreateStringAppendOperator();
|
|
|
|
NewDB(merge_op);
|
|
|
|
|
|
|
|
mock::MockFileContents file1 = {
|
|
|
|
{ KeyStr("a", 5U, kTypeMerge), "5" },
|
|
|
|
{ KeyStr("a", 4U, kTypeMerge), "4" },
|
|
|
|
{ KeyStr("a", 3U, kTypeValue), "3" },
|
|
|
|
};
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
mock::MockFileContents file2 = {
|
|
|
|
{ KeyStr("b", 2U, kTypeMerge), "2" },
|
|
|
|
{ KeyStr("b", 1U, kTypeValue), "1" }
|
|
|
|
};
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
mock::MockFileContents expected_results = {
|
|
|
|
{ KeyStr("a", 0U, kTypeValue), "3,4,5" },
|
|
|
|
{ KeyStr("b", 0U, kTypeValue), "1,2" }
|
|
|
|
};
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({ files }, expected_results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, NonAssocMerge) {
|
|
|
|
auto merge_op = MergeOperators::CreateStringAppendTESTOperator();
|
|
|
|
NewDB(merge_op);
|
|
|
|
|
|
|
|
mock::MockFileContents file1 = {
|
|
|
|
{ KeyStr("a", 5U, kTypeMerge), "5" },
|
|
|
|
{ KeyStr("a", 4U, kTypeMerge), "4" },
|
|
|
|
{ KeyStr("a", 3U, kTypeMerge), "3" },
|
|
|
|
};
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
mock::MockFileContents file2 = {
|
|
|
|
{ KeyStr("b", 2U, kTypeMerge), "2" },
|
|
|
|
{ KeyStr("b", 1U, kTypeMerge), "1" }
|
|
|
|
};
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
mock::MockFileContents expected_results = {
|
|
|
|
{ KeyStr("a", 0U, kTypeValue), "3,4,5" },
|
|
|
|
{ KeyStr("b", 2U, kTypeMerge), "2" },
|
|
|
|
{ KeyStr("b", 1U, kTypeMerge), "1" }
|
|
|
|
};
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
RunCompaction({ files }, expected_results);
|
2015-07-16 18:18:35 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|