2014-10-28 19:54:33 +01:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
|
2014-10-29 01:52:32 +01:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
|
2014-10-28 19:54:33 +01:00
|
|
|
#include "db/flush_job.h"
|
|
|
|
#include "db/column_family.h"
|
|
|
|
#include "db/version_set.h"
|
2014-12-02 21:09:20 +01:00
|
|
|
#include "db/writebuffer.h"
|
2014-10-28 19:54:33 +01:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
2014-10-29 01:52:32 +01:00
|
|
|
#include "table/mock_table.h"
|
2014-10-28 19:54:33 +01:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
// TODO(icanadi) Mock out everything else:
|
|
|
|
// 1. VersionSet
|
2014-10-29 01:52:32 +01:00
|
|
|
// 2. Memtable
|
2014-10-28 19:54:33 +01:00
|
|
|
class FlushJobTest {
|
|
|
|
public:
|
|
|
|
FlushJobTest()
|
|
|
|
: env_(Env::Default()),
|
|
|
|
dbname_(test::TmpDir() + "/flush_job_test"),
|
|
|
|
table_cache_(NewLRUCache(50000, 16, 8)),
|
2014-12-02 21:09:20 +01:00
|
|
|
write_buffer_(db_options_.db_write_buffer_size),
|
2014-10-28 19:54:33 +01:00
|
|
|
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
2014-12-02 21:09:20 +01:00
|
|
|
table_cache_.get(), &write_buffer_,
|
|
|
|
&write_controller_)),
|
2014-10-29 01:52:32 +01:00
|
|
|
shutting_down_(false),
|
2014-11-14 20:35:48 +01:00
|
|
|
mock_table_factory_(new mock::MockTableFactory()) {
|
2014-10-28 19:54:33 +01:00
|
|
|
ASSERT_OK(env_->CreateDirIfMissing(dbname_));
|
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
// TODO(icanadi) Remove this once we mock out VersionSet
|
|
|
|
NewDB();
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
2014-10-29 01:52:32 +01:00
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
|
|
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
2014-10-28 19:54:33 +01:00
|
|
|
|
|
|
|
ASSERT_OK(versions_->Recover(column_families, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
void NewDB() {
|
|
|
|
VersionEdit new_db;
|
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
|
|
|
unique_ptr<WritableFile> file;
|
|
|
|
Status s = env_->NewWritableFile(
|
|
|
|
manifest, &file, env_->OptimizeForManifestWrite(env_options_));
|
|
|
|
ASSERT_OK(s);
|
|
|
|
{
|
|
|
|
log::Writer log(std::move(file));
|
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
|
|
|
s = SetCurrentFile(env_, dbname_, 1, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Env* env_;
|
|
|
|
std::string dbname_;
|
|
|
|
EnvOptions env_options_;
|
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
|
|
|
DBOptions db_options_;
|
2014-12-02 21:09:20 +01:00
|
|
|
WriteBuffer write_buffer_;
|
2014-10-28 19:54:33 +01:00
|
|
|
ColumnFamilyOptions cf_options_;
|
|
|
|
std::unique_ptr<VersionSet> versions_;
|
2015-02-05 06:39:45 +01:00
|
|
|
InstrumentedMutex mutex_;
|
2014-10-28 19:54:33 +01:00
|
|
|
std::atomic<bool> shutting_down_;
|
2014-11-14 20:35:48 +01:00
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
2014-10-28 19:54:33 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST(FlushJobTest, Empty) {
|
2015-02-12 18:54:48 +01:00
|
|
|
JobContext job_context(0);
|
2014-10-28 19:54:33 +01:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 18:15:54 +01:00
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2014-10-28 19:54:33 +01:00
|
|
|
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
env_options_, versions_.get(), &mutex_, &shutting_down_,
|
2015-01-26 22:59:38 +01:00
|
|
|
SequenceNumber(), &job_context, nullptr, nullptr, nullptr,
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 18:15:54 +01:00
|
|
|
kNoCompression, nullptr, &event_logger);
|
2014-10-28 19:54:33 +01:00
|
|
|
ASSERT_OK(flush_job.Run());
|
2014-11-15 01:57:17 +01:00
|
|
|
job_context.Clean();
|
2014-10-28 19:54:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(FlushJobTest, NonEmpty) {
|
2015-02-12 18:54:48 +01:00
|
|
|
JobContext job_context(0);
|
2014-10-28 19:54:33 +01:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2014-12-02 21:09:20 +01:00
|
|
|
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions());
|
2014-10-28 19:54:33 +01:00
|
|
|
new_mem->Ref();
|
2014-10-29 01:52:32 +01:00
|
|
|
std::map<std::string, std::string> inserted_keys;
|
2014-10-28 19:54:33 +01:00
|
|
|
for (int i = 1; i < 10000; ++i) {
|
2014-11-25 05:44:49 +01:00
|
|
|
std::string key(ToString(i));
|
|
|
|
std::string value("value" + ToString(i));
|
2014-10-28 19:54:33 +01:00
|
|
|
new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
|
2014-10-29 01:52:32 +01:00
|
|
|
InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
|
|
|
|
inserted_keys.insert({internal_key.Encode().ToString(), value});
|
2014-10-28 19:54:33 +01:00
|
|
|
}
|
|
|
|
cfd->imm()->Add(new_mem);
|
|
|
|
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 18:15:54 +01:00
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2014-10-28 19:54:33 +01:00
|
|
|
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
env_options_, versions_.get(), &mutex_, &shutting_down_,
|
2015-01-26 22:59:38 +01:00
|
|
|
SequenceNumber(), &job_context, nullptr, nullptr, nullptr,
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 18:15:54 +01:00
|
|
|
kNoCompression, nullptr, &event_logger);
|
2014-10-28 19:54:33 +01:00
|
|
|
mutex_.Lock();
|
|
|
|
ASSERT_OK(flush_job.Run());
|
|
|
|
mutex_.Unlock();
|
2014-10-29 01:52:32 +01:00
|
|
|
mock_table_factory_->AssertSingleFile(inserted_keys);
|
2014-11-15 01:57:17 +01:00
|
|
|
job_context.Clean();
|
2014-10-28 19:54:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); }
|