2014-01-02 18:08:12 +01:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
#include <algorithm>
|
|
|
|
#include <vector>
|
|
|
|
#include <string>
|
|
|
|
|
2014-01-02 18:08:12 +01:00
|
|
|
#include "db/db_impl.h"
|
|
|
|
#include "rocksdb/db.h"
|
2015-03-20 01:04:29 +01:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2014-01-02 18:08:12 +01:00
|
|
|
#include "util/testharness.h"
|
2014-02-25 19:38:04 +01:00
|
|
|
#include "util/testutil.h"
|
2014-02-27 01:05:24 +01:00
|
|
|
#include "util/coding.h"
|
2014-01-28 20:05:04 +01:00
|
|
|
#include "utilities/merge_operators.h"
|
2014-01-02 18:08:12 +01:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
2014-02-25 19:38:04 +01:00
|
|
|
namespace {
|
|
|
|
std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2014-04-15 18:57:25 +02:00
|
|
|
// counts how many operations were performed
|
|
|
|
class EnvCounter : public EnvWrapper {
|
|
|
|
public:
|
|
|
|
explicit EnvCounter(Env* base)
|
|
|
|
: EnvWrapper(base), num_new_writable_file_(0) {}
|
|
|
|
int GetNumberOfNewWritableFileCalls() {
|
|
|
|
return num_new_writable_file_;
|
|
|
|
}
|
|
|
|
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
|
2015-02-26 20:28:41 +01:00
|
|
|
const EnvOptions& soptions) override {
|
2014-04-15 18:57:25 +02:00
|
|
|
++num_new_writable_file_;
|
|
|
|
return EnvWrapper::NewWritableFile(f, r, soptions);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
int num_new_writable_file_;
|
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class ColumnFamilyTest : public testing::Test {
|
2014-01-02 18:08:12 +01:00
|
|
|
public:
|
2014-02-25 19:38:04 +01:00
|
|
|
ColumnFamilyTest() : rnd_(139) {
|
2014-04-15 18:57:25 +02:00
|
|
|
env_ = new EnvCounter(Env::Default());
|
2014-01-02 18:08:12 +01:00
|
|
|
dbname_ = test::TmpDir() + "/column_family_test";
|
|
|
|
db_options_.create_if_missing = true;
|
2014-04-15 18:57:25 +02:00
|
|
|
db_options_.env = env_;
|
2014-01-28 20:05:04 +01:00
|
|
|
DestroyDB(dbname_, Options(db_options_, column_family_options_));
|
2014-01-02 18:08:12 +01:00
|
|
|
}
|
|
|
|
|
2014-04-16 02:12:18 +02:00
|
|
|
~ColumnFamilyTest() {
|
|
|
|
delete env_;
|
|
|
|
}
|
|
|
|
|
2014-01-02 18:08:12 +01:00
|
|
|
void Close() {
|
2014-02-11 02:04:44 +01:00
|
|
|
for (auto h : handles_) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
handles_.clear();
|
2014-02-26 23:16:23 +01:00
|
|
|
names_.clear();
|
2014-01-02 18:08:12 +01:00
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
Status TryOpen(std::vector<std::string> cf,
|
|
|
|
std::vector<ColumnFamilyOptions> options = {}) {
|
2014-02-26 19:03:34 +01:00
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
2014-02-26 23:16:23 +01:00
|
|
|
names_.clear();
|
|
|
|
for (size_t i = 0; i < cf.size(); ++i) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
cf[i], options.size() == 0 ? column_family_options_ : options[i]));
|
|
|
|
names_.push_back(cf[i]);
|
2014-01-06 22:31:06 +01:00
|
|
|
}
|
2014-02-07 23:48:48 +01:00
|
|
|
return DB::Open(db_options_, dbname_, column_families, &handles_, &db_);
|
2014-01-02 18:08:12 +01:00
|
|
|
}
|
|
|
|
|
2014-04-09 18:56:17 +02:00
|
|
|
Status OpenReadOnly(std::vector<std::string> cf,
|
|
|
|
std::vector<ColumnFamilyOptions> options = {}) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
names_.clear();
|
|
|
|
for (size_t i = 0; i < cf.size(); ++i) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
cf[i], options.size() == 0 ? column_family_options_ : options[i]));
|
|
|
|
names_.push_back(cf[i]);
|
|
|
|
}
|
|
|
|
return DB::OpenForReadOnly(db_options_, dbname_, column_families, &handles_,
|
|
|
|
&db_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AssertOpenReadOnly(std::vector<std::string> cf,
|
|
|
|
std::vector<ColumnFamilyOptions> options = {}) {
|
|
|
|
ASSERT_OK(OpenReadOnly(cf, options));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
void Open(std::vector<std::string> cf,
|
|
|
|
std::vector<ColumnFamilyOptions> options = {}) {
|
|
|
|
ASSERT_OK(TryOpen(cf, options));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Open() {
|
|
|
|
Open({"default"});
|
|
|
|
}
|
|
|
|
|
2014-02-25 19:38:04 +01:00
|
|
|
DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
int GetProperty(int cf, std::string property) {
|
|
|
|
std::string value;
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(dbfull()->GetProperty(handles_[cf], property, &value));
|
2015-04-24 04:17:57 +02:00
|
|
|
#ifndef CYGWIN
|
2014-02-26 23:16:23 +01:00
|
|
|
return std::stoi(value);
|
2015-04-24 04:17:57 +02:00
|
|
|
#else
|
2015-06-08 20:43:55 +02:00
|
|
|
return std::strtol(value.c_str(), 0 /* off */, 10 /* base */);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2014-02-26 23:16:23 +01:00
|
|
|
}
|
|
|
|
|
2014-01-28 20:05:04 +01:00
|
|
|
void Destroy() {
|
2014-02-11 02:04:44 +01:00
|
|
|
for (auto h : handles_) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
handles_.clear();
|
2014-02-26 23:16:23 +01:00
|
|
|
names_.clear();
|
2014-01-28 20:05:04 +01:00
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, Options(db_options_, column_family_options_)));
|
|
|
|
}
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
void CreateColumnFamilies(
|
|
|
|
const std::vector<std::string>& cfs,
|
|
|
|
const std::vector<ColumnFamilyOptions> options = {}) {
|
2014-11-11 22:47:22 +01:00
|
|
|
int cfi = static_cast<int>(handles_.size());
|
2014-01-28 20:05:04 +01:00
|
|
|
handles_.resize(cfi + cfs.size());
|
2014-02-26 23:16:23 +01:00
|
|
|
names_.resize(cfi + cfs.size());
|
|
|
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
|
|
|
ASSERT_OK(db_->CreateColumnFamily(
|
|
|
|
options.size() == 0 ? column_family_options_ : options[i], cfs[i],
|
|
|
|
&handles_[cfi]));
|
|
|
|
names_[cfi] = cfs[i];
|
|
|
|
cfi++;
|
2014-01-28 20:05:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
void Reopen(const std::vector<ColumnFamilyOptions> options = {}) {
|
|
|
|
std::vector<std::string> names;
|
|
|
|
for (auto name : names_) {
|
|
|
|
if (name != "") {
|
|
|
|
names.push_back(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Close();
|
|
|
|
assert(options.size() == 0 || names.size() == options.size());
|
|
|
|
Open(names, options);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateColumnFamiliesAndReopen(const std::vector<std::string>& cfs) {
|
|
|
|
CreateColumnFamilies(cfs);
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
void DropColumnFamilies(const std::vector<int>& cfs) {
|
2014-02-11 02:04:44 +01:00
|
|
|
for (auto cf : cfs) {
|
|
|
|
ASSERT_OK(db_->DropColumnFamily(handles_[cf]));
|
|
|
|
delete handles_[cf];
|
|
|
|
handles_[cf] = nullptr;
|
2014-02-26 23:16:23 +01:00
|
|
|
names_[cf] = "";
|
2014-02-11 02:04:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
void PutRandomData(int cf, int num, int key_value_size) {
|
|
|
|
for (int i = 0; i < num; ++i) {
|
|
|
|
// 10 bytes for key, rest is value
|
|
|
|
ASSERT_OK(Put(cf, test::RandomKey(&rnd_, 10),
|
|
|
|
RandomString(&rnd_, key_value_size - 10)));
|
2014-02-25 19:38:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WaitForFlush(int cf) {
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf]));
|
|
|
|
}
|
|
|
|
|
2014-02-27 01:05:24 +01:00
|
|
|
void WaitForCompaction() { ASSERT_OK(dbfull()->TEST_WaitForCompact()); }
|
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
Status Put(int cf, const std::string& key, const std::string& value) {
|
2014-01-28 20:05:04 +01:00
|
|
|
return db_->Put(WriteOptions(), handles_[cf], Slice(key), Slice(value));
|
|
|
|
}
|
2014-02-26 19:03:34 +01:00
|
|
|
Status Merge(int cf, const std::string& key, const std::string& value) {
|
2014-01-28 20:05:04 +01:00
|
|
|
return db_->Merge(WriteOptions(), handles_[cf], Slice(key), Slice(value));
|
|
|
|
}
|
2014-01-31 02:48:42 +01:00
|
|
|
Status Flush(int cf) {
|
|
|
|
return db_->Flush(FlushOptions(), handles_[cf]);
|
|
|
|
}
|
2014-01-28 20:05:04 +01:00
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
std::string Get(int cf, const std::string& key) {
|
2014-01-28 20:05:04 +01:00
|
|
|
ReadOptions options;
|
|
|
|
options.verify_checksums = true;
|
2014-02-26 19:03:34 +01:00
|
|
|
std::string result;
|
2014-01-28 20:05:04 +01:00
|
|
|
Status s = db_->Get(options, handles_[cf], Slice(key), &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-02-27 01:05:24 +01:00
|
|
|
void CompactAll(int cf) {
|
2015-06-17 23:36:14 +02:00
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), handles_[cf], nullptr,
|
|
|
|
nullptr));
|
2014-02-27 01:05:24 +01:00
|
|
|
}
|
|
|
|
|
2014-02-01 01:45:20 +01:00
|
|
|
void Compact(int cf, const Slice& start, const Slice& limit) {
|
2015-06-17 23:36:14 +02:00
|
|
|
ASSERT_OK(
|
|
|
|
db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
|
2014-02-01 01:45:20 +01:00
|
|
|
}
|
|
|
|
|
2014-02-27 01:05:24 +01:00
|
|
|
int NumTableFilesAtLevel(int level, int cf) {
|
|
|
|
return GetProperty(cf,
|
2014-11-25 05:44:49 +01:00
|
|
|
"rocksdb.num-files-at-level" + ToString(level));
|
2014-02-01 01:45:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return spread of files per level
|
2014-02-26 19:03:34 +01:00
|
|
|
std::string FilesPerLevel(int cf) {
|
|
|
|
std::string result;
|
2014-02-01 01:45:20 +01:00
|
|
|
int last_non_zero_offset = 0;
|
2014-02-27 01:05:24 +01:00
|
|
|
for (int level = 0; level < dbfull()->NumberLevels(handles_[cf]); level++) {
|
|
|
|
int f = NumTableFilesAtLevel(level, cf);
|
2014-02-01 01:45:20 +01:00
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
|
|
|
|
result += buf;
|
|
|
|
if (f > 0) {
|
2014-11-11 22:47:22 +01:00
|
|
|
last_non_zero_offset = static_cast<int>(result.size());
|
2014-02-01 01:45:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
result.resize(last_non_zero_offset);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-04-30 20:33:40 +02:00
|
|
|
int CountLiveFiles() {
|
2014-02-11 02:04:44 +01:00
|
|
|
std::vector<LiveFileMetaData> metadata;
|
|
|
|
db_->GetLiveFilesMetaData(&metadata);
|
|
|
|
return static_cast<int>(metadata.size());
|
|
|
|
}
|
|
|
|
|
2014-02-01 01:45:20 +01:00
|
|
|
// Do n memtable flushes, each of which produces an sstable
|
|
|
|
// covering the range [small,large].
|
2014-02-26 19:03:34 +01:00
|
|
|
void MakeTables(int cf, int n, const std::string& small,
|
|
|
|
const std::string& large) {
|
2014-02-01 01:45:20 +01:00
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
ASSERT_OK(Put(cf, small, "begin"));
|
|
|
|
ASSERT_OK(Put(cf, large, "end"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions(), handles_[cf]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-25 19:38:04 +01:00
|
|
|
int CountLiveLogFiles() {
|
2014-02-27 19:29:13 +01:00
|
|
|
int micros_wait_for_log_deletion = 20000;
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_log_deletion);
|
2014-02-25 19:38:04 +01:00
|
|
|
int ret = 0;
|
|
|
|
VectorLogPtr wal_files;
|
2014-02-26 23:41:18 +01:00
|
|
|
Status s;
|
|
|
|
// GetSortedWalFiles is a flakey function -- it gets all the wal_dir
|
2015-04-25 11:14:27 +02:00
|
|
|
// children files and then later checks for their existence. if some of the
|
2014-02-26 23:41:18 +01:00
|
|
|
// log files doesn't exist anymore, it reports an error. it does all of this
|
|
|
|
// without DB mutex held, so if a background process deletes the log file
|
|
|
|
// while the function is being executed, it returns an error. We retry the
|
|
|
|
// function 10 times to avoid the error failing the test
|
|
|
|
for (int retries = 0; retries < 10; ++retries) {
|
|
|
|
wal_files.clear();
|
|
|
|
s = db_->GetSortedWalFiles(wal_files);
|
|
|
|
if (s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_OK(s);
|
2014-02-25 19:38:04 +01:00
|
|
|
for (const auto& wal : wal_files) {
|
|
|
|
if (wal->Type() == kAliveLogFile) {
|
|
|
|
++ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
void AssertNumberOfImmutableMemtables(std::vector<int> num_per_cf) {
|
|
|
|
assert(num_per_cf.size() == handles_.size());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num_per_cf.size(); ++i) {
|
2014-11-11 22:47:22 +01:00
|
|
|
ASSERT_EQ(num_per_cf[i], GetProperty(static_cast<int>(i),
|
|
|
|
"rocksdb.num-immutable-mem-table"));
|
2014-02-26 23:16:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
void CopyFile(const std::string& source, const std::string& destination,
|
2014-01-28 20:05:04 +01:00
|
|
|
uint64_t size = 0) {
|
|
|
|
const EnvOptions soptions;
|
|
|
|
unique_ptr<SequentialFile> srcfile;
|
|
|
|
ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
|
|
|
|
unique_ptr<WritableFile> destfile;
|
|
|
|
ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
// default argument means copy everything
|
|
|
|
ASSERT_OK(env_->GetFileSize(source, &size));
|
|
|
|
}
|
|
|
|
|
|
|
|
char buffer[4096];
|
|
|
|
Slice slice;
|
|
|
|
while (size > 0) {
|
2014-02-26 19:03:34 +01:00
|
|
|
uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
|
2014-01-28 20:05:04 +01:00
|
|
|
ASSERT_OK(srcfile->Read(one, &slice, buffer));
|
|
|
|
ASSERT_OK(destfile->Append(slice));
|
|
|
|
size -= slice.size();
|
|
|
|
}
|
|
|
|
ASSERT_OK(destfile->Close());
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
std::vector<ColumnFamilyHandle*> handles_;
|
2014-02-26 23:16:23 +01:00
|
|
|
std::vector<std::string> names_;
|
2014-01-02 18:08:12 +01:00
|
|
|
ColumnFamilyOptions column_family_options_;
|
|
|
|
DBOptions db_options_;
|
2014-02-26 19:03:34 +01:00
|
|
|
std::string dbname_;
|
2014-02-01 01:45:20 +01:00
|
|
|
DB* db_ = nullptr;
|
2014-04-15 18:57:25 +02:00
|
|
|
EnvCounter* env_;
|
2014-02-25 19:38:04 +01:00
|
|
|
Random rnd_;
|
2014-01-02 18:08:12 +01:00
|
|
|
};
|
|
|
|
|
2015-02-24 01:08:27 +01:00
|
|
|
class DumbLogger : public Logger {
|
|
|
|
public:
|
|
|
|
using Logger::Logv;
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Logv(const char* format, va_list ap) override {}
|
|
|
|
virtual size_t GetLogFileSize() const override { return 0; }
|
2015-02-24 01:08:27 +01:00
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, DontReuseColumnFamilyID) {
|
2014-03-05 21:13:44 +01:00
|
|
|
for (int iter = 0; iter < 3; ++iter) {
|
|
|
|
Open();
|
|
|
|
CreateColumnFamilies({"one", "two", "three"});
|
|
|
|
for (size_t i = 0; i < handles_.size(); ++i) {
|
2014-03-14 19:26:13 +01:00
|
|
|
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(handles_[i]);
|
|
|
|
ASSERT_EQ(i, cfh->GetID());
|
2014-03-05 21:13:44 +01:00
|
|
|
}
|
|
|
|
if (iter == 1) {
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
DropColumnFamilies({3});
|
|
|
|
Reopen();
|
|
|
|
if (iter == 2) {
|
|
|
|
// this tests if max_column_family is correctly persisted with
|
|
|
|
// WriteSnapshot()
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
CreateColumnFamilies({"three2"});
|
|
|
|
// ID 3 that was used for dropped column family "three" should not be reused
|
2014-03-14 19:26:13 +01:00
|
|
|
auto cfh3 = reinterpret_cast<ColumnFamilyHandleImpl*>(handles_[3]);
|
2014-04-29 22:25:23 +02:00
|
|
|
ASSERT_EQ(4U, cfh3->GetID());
|
2014-03-05 21:13:44 +01:00
|
|
|
Close();
|
|
|
|
Destroy();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, AddDrop) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Open();
|
2014-02-11 02:04:44 +01:00
|
|
|
CreateColumnFamilies({"one", "two", "three"});
|
2014-02-28 19:29:37 +01:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(2, "fodor"));
|
2014-02-11 02:04:44 +01:00
|
|
|
DropColumnFamilies({2});
|
2014-02-28 19:29:37 +01:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
|
2014-02-11 02:04:44 +01:00
|
|
|
CreateColumnFamilies({"four"});
|
2014-02-28 19:29:37 +01:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get(3, "fodor"));
|
|
|
|
ASSERT_OK(Put(1, "fodor", "mirko"));
|
|
|
|
ASSERT_EQ("mirko", Get(1, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(3, "fodor"));
|
2014-01-02 18:08:12 +01:00
|
|
|
Close();
|
2014-02-26 23:16:23 +01:00
|
|
|
ASSERT_TRUE(TryOpen({"default"}).IsInvalidArgument());
|
|
|
|
Open({"default", "one", "three", "four"});
|
|
|
|
DropColumnFamilies({1});
|
|
|
|
Reopen();
|
2014-01-02 18:08:12 +01:00
|
|
|
Close();
|
|
|
|
|
2014-02-26 19:03:34 +01:00
|
|
|
std::vector<std::string> families;
|
2014-01-22 20:44:53 +01:00
|
|
|
ASSERT_OK(DB::ListColumnFamilies(db_options_, dbname_, &families));
|
2014-01-02 18:08:12 +01:00
|
|
|
sort(families.begin(), families.end());
|
2014-02-26 19:03:34 +01:00
|
|
|
ASSERT_TRUE(families ==
|
2014-02-26 23:16:23 +01:00
|
|
|
std::vector<std::string>({"default", "four", "three"}));
|
2014-01-02 18:08:12 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, DropTest) {
|
2014-02-11 02:04:44 +01:00
|
|
|
// first iteration - dont reopen DB before dropping
|
|
|
|
// second iteration - reopen DB before dropping
|
|
|
|
for (int iter = 0; iter < 2; ++iter) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Open({"default"});
|
|
|
|
CreateColumnFamiliesAndReopen({"pikachu"});
|
2014-02-11 02:04:44 +01:00
|
|
|
for (int i = 0; i < 100; ++i) {
|
2014-11-25 05:44:49 +01:00
|
|
|
ASSERT_OK(Put(1, ToString(i), "bar" + ToString(i)));
|
2014-02-11 02:04:44 +01:00
|
|
|
}
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
|
|
|
|
if (iter == 1) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Reopen();
|
2014-02-11 02:04:44 +01:00
|
|
|
}
|
|
|
|
ASSERT_EQ("bar1", Get(1, "1"));
|
|
|
|
|
2014-04-30 20:33:40 +02:00
|
|
|
ASSERT_EQ(CountLiveFiles(), 1);
|
2014-02-11 02:04:44 +01:00
|
|
|
DropColumnFamilies({1});
|
|
|
|
// make sure that all files are deleted when we drop the column family
|
2014-04-30 20:33:40 +02:00
|
|
|
ASSERT_EQ(CountLiveFiles(), 0);
|
2014-02-11 02:04:44 +01:00
|
|
|
Destroy();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, WriteBatchFailure) {
|
2014-02-26 02:30:54 +01:00
|
|
|
Open();
|
2014-03-14 19:26:13 +01:00
|
|
|
CreateColumnFamiliesAndReopen({"one", "two"});
|
2014-02-26 02:30:54 +01:00
|
|
|
WriteBatch batch;
|
2014-09-02 22:29:05 +02:00
|
|
|
batch.Put(handles_[0], Slice("existing"), Slice("column-family"));
|
2014-03-14 19:26:13 +01:00
|
|
|
batch.Put(handles_[1], Slice("non-existing"), Slice("column-family"));
|
|
|
|
ASSERT_OK(db_->Write(WriteOptions(), &batch));
|
|
|
|
DropColumnFamilies({1});
|
2014-09-02 22:29:05 +02:00
|
|
|
WriteOptions woptions_ignore_missing_cf;
|
|
|
|
woptions_ignore_missing_cf.ignore_missing_column_families = true;
|
|
|
|
batch.Put(handles_[0], Slice("still here"), Slice("column-family"));
|
|
|
|
ASSERT_OK(db_->Write(woptions_ignore_missing_cf, &batch));
|
|
|
|
ASSERT_EQ("column-family", Get(0, "still here"));
|
2014-02-26 02:30:54 +01:00
|
|
|
Status s = db_->Write(WriteOptions(), &batch);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, ReadWrite) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Open();
|
|
|
|
CreateColumnFamiliesAndReopen({"one", "two"});
|
2014-01-28 20:05:04 +01:00
|
|
|
ASSERT_OK(Put(0, "foo", "v1"));
|
|
|
|
ASSERT_OK(Put(0, "bar", "v2"));
|
|
|
|
ASSERT_OK(Put(1, "mirko", "v3"));
|
|
|
|
ASSERT_OK(Put(0, "foo", "v2"));
|
|
|
|
ASSERT_OK(Put(2, "fodor", "v5"));
|
|
|
|
|
|
|
|
for (int iter = 0; iter <= 3; ++iter) {
|
|
|
|
ASSERT_EQ("v2", Get(0, "foo"));
|
|
|
|
ASSERT_EQ("v2", Get(0, "bar"));
|
|
|
|
ASSERT_EQ("v3", Get(1, "mirko"));
|
|
|
|
ASSERT_EQ("v5", Get(2, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(0, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(2, "foo"));
|
|
|
|
if (iter <= 1) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Reopen();
|
2014-01-28 20:05:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, IgnoreRecoveredLog) {
|
2014-02-26 19:03:34 +01:00
|
|
|
std::string backup_logs = dbname_ + "/backup_logs";
|
2014-01-28 20:05:04 +01:00
|
|
|
|
|
|
|
// delete old files in backup_logs directory
|
2014-01-29 23:06:59 +01:00
|
|
|
ASSERT_OK(env_->CreateDirIfMissing(dbname_));
|
|
|
|
ASSERT_OK(env_->CreateDirIfMissing(backup_logs));
|
2014-02-26 19:03:34 +01:00
|
|
|
std::vector<std::string> old_files;
|
2014-01-28 20:05:04 +01:00
|
|
|
env_->GetChildren(backup_logs, &old_files);
|
|
|
|
for (auto& file : old_files) {
|
|
|
|
if (file != "." && file != "..") {
|
|
|
|
env_->DeleteFile(backup_logs + "/" + file);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
column_family_options_.merge_operator =
|
|
|
|
MergeOperators::CreateUInt64AddOperator();
|
|
|
|
db_options_.wal_dir = dbname_ + "/logs";
|
|
|
|
Destroy();
|
2014-02-26 23:16:23 +01:00
|
|
|
Open();
|
2014-01-28 20:05:04 +01:00
|
|
|
CreateColumnFamilies({"cf1", "cf2"});
|
|
|
|
|
|
|
|
// fill up the DB
|
2014-02-26 19:03:34 +01:00
|
|
|
std::string one, two, three;
|
2014-01-28 20:05:04 +01:00
|
|
|
PutFixed64(&one, 1);
|
|
|
|
PutFixed64(&two, 2);
|
|
|
|
PutFixed64(&three, 3);
|
|
|
|
ASSERT_OK(Merge(0, "foo", one));
|
|
|
|
ASSERT_OK(Merge(1, "mirko", one));
|
|
|
|
ASSERT_OK(Merge(0, "foo", one));
|
|
|
|
ASSERT_OK(Merge(2, "bla", one));
|
|
|
|
ASSERT_OK(Merge(2, "fodor", one));
|
|
|
|
ASSERT_OK(Merge(0, "bar", one));
|
|
|
|
ASSERT_OK(Merge(2, "bla", one));
|
|
|
|
ASSERT_OK(Merge(1, "mirko", two));
|
|
|
|
ASSERT_OK(Merge(1, "franjo", one));
|
|
|
|
|
|
|
|
// copy the logs to backup
|
2014-02-26 19:03:34 +01:00
|
|
|
std::vector<std::string> logs;
|
2014-01-28 20:05:04 +01:00
|
|
|
env_->GetChildren(db_options_.wal_dir, &logs);
|
|
|
|
for (auto& log : logs) {
|
|
|
|
if (log != ".." && log != ".") {
|
|
|
|
CopyFile(db_options_.wal_dir + "/" + log, backup_logs + "/" + log);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// recover the DB
|
|
|
|
Close();
|
|
|
|
|
|
|
|
// 1. check consistency
|
|
|
|
// 2. copy the logs from backup back to WAL dir. if the recovery happens
|
|
|
|
// again on the same log files, this should lead to incorrect results
|
|
|
|
// due to applying merge operator twice
|
|
|
|
// 3. check consistency
|
|
|
|
for (int iter = 0; iter < 2; ++iter) {
|
|
|
|
// assert consistency
|
2014-02-26 23:16:23 +01:00
|
|
|
Open({"default", "cf1", "cf2"});
|
2014-01-28 20:05:04 +01:00
|
|
|
ASSERT_EQ(two, Get(0, "foo"));
|
|
|
|
ASSERT_EQ(one, Get(0, "bar"));
|
|
|
|
ASSERT_EQ(three, Get(1, "mirko"));
|
|
|
|
ASSERT_EQ(one, Get(1, "franjo"));
|
|
|
|
ASSERT_EQ(one, Get(2, "fodor"));
|
|
|
|
ASSERT_EQ(two, Get(2, "bla"));
|
|
|
|
Close();
|
|
|
|
|
|
|
|
if (iter == 0) {
|
|
|
|
// copy the logs from backup back to wal dir
|
|
|
|
for (auto& log : logs) {
|
|
|
|
if (log != ".." && log != ".") {
|
|
|
|
CopyFile(backup_logs + "/" + log, db_options_.wal_dir + "/" + log);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, FlushTest) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Open();
|
|
|
|
CreateColumnFamiliesAndReopen({"one", "two"});
|
2014-01-31 02:48:42 +01:00
|
|
|
ASSERT_OK(Put(0, "foo", "v1"));
|
|
|
|
ASSERT_OK(Put(0, "bar", "v2"));
|
|
|
|
ASSERT_OK(Put(1, "mirko", "v3"));
|
|
|
|
ASSERT_OK(Put(0, "foo", "v2"));
|
|
|
|
ASSERT_OK(Put(2, "fodor", "v5"));
|
2014-12-08 21:52:18 +01:00
|
|
|
|
|
|
|
for (int j = 0; j < 2; j++) {
|
|
|
|
ReadOptions ro;
|
|
|
|
std::vector<Iterator*> iterators;
|
|
|
|
// Hold super version.
|
|
|
|
if (j == 0) {
|
|
|
|
ASSERT_OK(db_->NewIterators(ro, handles_, &iterators));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
|
|
|
uint64_t max_total_in_memory_state =
|
2015-03-30 21:04:10 +02:00
|
|
|
dbfull()->TEST_MaxTotalInMemoryState();
|
2014-12-08 21:52:18 +01:00
|
|
|
Flush(i);
|
2015-03-30 21:04:10 +02:00
|
|
|
ASSERT_EQ(dbfull()->TEST_MaxTotalInMemoryState(),
|
2014-12-08 21:52:18 +01:00
|
|
|
max_total_in_memory_state);
|
|
|
|
}
|
|
|
|
ASSERT_OK(Put(1, "foofoo", "bar"));
|
|
|
|
ASSERT_OK(Put(0, "foofoo", "bar"));
|
|
|
|
|
|
|
|
for (auto* it : iterators) {
|
|
|
|
delete it;
|
|
|
|
}
|
2014-01-31 02:48:42 +01:00
|
|
|
}
|
2014-02-26 23:16:23 +01:00
|
|
|
Reopen();
|
2014-01-31 02:48:42 +01:00
|
|
|
|
|
|
|
for (int iter = 0; iter <= 2; ++iter) {
|
|
|
|
ASSERT_EQ("v2", Get(0, "foo"));
|
|
|
|
ASSERT_EQ("v2", Get(0, "bar"));
|
|
|
|
ASSERT_EQ("v3", Get(1, "mirko"));
|
|
|
|
ASSERT_EQ("v5", Get(2, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(0, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(2, "foo"));
|
|
|
|
if (iter <= 1) {
|
2014-02-26 23:16:23 +01:00
|
|
|
Reopen();
|
2014-01-31 02:48:42 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2014-02-25 19:38:04 +01:00
|
|
|
// Makes sure that obsolete log files get deleted
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, LogDeletionTest) {
|
2014-04-30 20:33:40 +02:00
|
|
|
db_options_.max_total_wal_size = std::numeric_limits<uint64_t>::max();
|
2014-02-25 19:38:04 +01:00
|
|
|
column_family_options_.write_buffer_size = 100000; // 100KB
|
2014-02-26 23:16:23 +01:00
|
|
|
Open();
|
2014-02-25 19:38:04 +01:00
|
|
|
CreateColumnFamilies({"one", "two", "three", "four"});
|
|
|
|
// Each bracket is one log file. if number is in (), it means
|
|
|
|
// we don't need it anymore (it's been flushed)
|
|
|
|
// []
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 0);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(0, 1, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
// [0]
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(1, 1, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
// [0, 1]
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(1, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(1);
|
|
|
|
// [0, (1)] [1]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 2);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(0, 1, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
// [0, (1)] [0, 1]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 2);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(2, 1, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
// [0, (1)] [0, 1, 2]
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(2, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(2);
|
|
|
|
// [0, (1)] [0, 1, (2)] [2]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 3);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(2, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(2);
|
|
|
|
// [0, (1)] [0, 1, (2)] [(2)] [2]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 4);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(3, 1, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
// [0, (1)] [0, 1, (2)] [(2)] [2, 3]
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(1, 1, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
// [0, (1)] [0, 1, (2)] [(2)] [1, 2, 3]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 4);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(1, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(1);
|
|
|
|
// [0, (1)] [0, (1), (2)] [(2)] [(1), 2, 3] [1]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 5);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(0, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(0);
|
|
|
|
// [(0), (1)] [(0), (1), (2)] [(2)] [(1), 2, 3] [1, (0)] [0]
|
|
|
|
// delete obsolete logs -->
|
|
|
|
// [(1), 2, 3] [1, (0)] [0]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 3);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(0, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(0);
|
|
|
|
// [(1), 2, 3] [1, (0)], [(0)] [0]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 4);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(1, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(1);
|
|
|
|
// [(1), 2, 3] [(1), (0)] [(0)] [0, (1)] [1]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 5);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(2, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(2);
|
|
|
|
// [(1), (2), 3] [(1), (0)] [(0)] [0, (1)] [1, (2)], [2]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 6);
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(3, 1000, 100);
|
2014-02-25 19:38:04 +01:00
|
|
|
WaitForFlush(3);
|
|
|
|
// [(1), (2), (3)] [(1), (0)] [(0)] [0, (1)] [1, (2)], [2, (3)] [3]
|
|
|
|
// delete obsolete logs -->
|
|
|
|
// [0, (1)] [1, (2)], [2, (3)] [3]
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 4);
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2014-02-26 23:16:23 +01:00
|
|
|
// Makes sure that obsolete log files get deleted
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
|
2014-04-30 20:33:40 +02:00
|
|
|
// disable flushing stale column families
|
|
|
|
db_options_.max_total_wal_size = std::numeric_limits<uint64_t>::max();
|
2014-02-26 23:16:23 +01:00
|
|
|
Open();
|
|
|
|
CreateColumnFamilies({"one", "two", "three"});
|
|
|
|
ColumnFamilyOptions default_cf, one, two, three;
|
|
|
|
// setup options. all column families have max_write_buffer_number setup to 10
|
|
|
|
// "default" -> 100KB memtable, start flushing immediatelly
|
|
|
|
// "one" -> 200KB memtable, start flushing with two immutable memtables
|
|
|
|
// "two" -> 1MB memtable, start flushing with three immutable memtables
|
|
|
|
// "three" -> 90KB memtable, start flushing with four immutable memtables
|
|
|
|
default_cf.write_buffer_size = 100000;
|
|
|
|
default_cf.max_write_buffer_number = 10;
|
|
|
|
default_cf.min_write_buffer_number_to_merge = 1;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
default_cf.max_write_buffer_number_to_maintain = 0;
|
2014-02-26 23:16:23 +01:00
|
|
|
one.write_buffer_size = 200000;
|
|
|
|
one.max_write_buffer_number = 10;
|
|
|
|
one.min_write_buffer_number_to_merge = 2;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
one.max_write_buffer_number_to_maintain = 1;
|
2014-02-26 23:16:23 +01:00
|
|
|
two.write_buffer_size = 1000000;
|
|
|
|
two.max_write_buffer_number = 10;
|
|
|
|
two.min_write_buffer_number_to_merge = 3;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
two.max_write_buffer_number_to_maintain = 2;
|
2014-02-26 23:16:23 +01:00
|
|
|
three.write_buffer_size = 90000;
|
|
|
|
three.max_write_buffer_number = 10;
|
|
|
|
three.min_write_buffer_number_to_merge = 4;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
three.max_write_buffer_number_to_maintain = -1;
|
2014-02-26 23:16:23 +01:00
|
|
|
|
|
|
|
Reopen({default_cf, one, two, three});
|
|
|
|
|
2014-02-27 19:29:13 +01:00
|
|
|
int micros_wait_for_flush = 10000;
|
2014-02-26 23:16:23 +01:00
|
|
|
PutRandomData(0, 100, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(0);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 1);
|
|
|
|
PutRandomData(1, 200, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 2);
|
|
|
|
PutRandomData(2, 1000, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 1, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 3);
|
|
|
|
PutRandomData(2, 1000, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 2, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 4);
|
|
|
|
PutRandomData(3, 90, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 2, 1});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 5);
|
|
|
|
PutRandomData(3, 90, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 2, 2});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 6);
|
|
|
|
PutRandomData(3, 90, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 2, 3});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 7);
|
|
|
|
PutRandomData(0, 100, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(0);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 2, 3});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 8);
|
|
|
|
PutRandomData(2, 100, 10000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(2);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 0, 3});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 9);
|
|
|
|
PutRandomData(3, 90, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(3);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 10);
|
|
|
|
PutRandomData(3, 90, 1000);
|
|
|
|
env_->SleepForMicroseconds(micros_wait_for_flush);
|
|
|
|
AssertNumberOfImmutableMemtables({0, 1, 0, 1});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 11);
|
|
|
|
PutRandomData(1, 200, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(1);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 0, 0, 1});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 5);
|
2015-05-06 00:59:02 +02:00
|
|
|
PutRandomData(3, 240, 1000);
|
|
|
|
WaitForFlush(3);
|
|
|
|
PutRandomData(3, 300, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(3);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 12);
|
|
|
|
PutRandomData(0, 100, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(0);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 12);
|
|
|
|
PutRandomData(2, 3*100, 10000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(2);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 12);
|
|
|
|
PutRandomData(1, 2*200, 1000);
|
2014-02-27 19:29:13 +01:00
|
|
|
WaitForFlush(1);
|
2014-02-26 23:16:23 +01:00
|
|
|
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
|
|
|
|
ASSERT_EQ(CountLiveLogFiles(), 7);
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) {
|
2014-12-11 03:39:09 +01:00
|
|
|
Open();
|
|
|
|
auto* s1 = dbfull()->GetSnapshot();
|
|
|
|
ASSERT_TRUE(s1 != nullptr);
|
|
|
|
dbfull()->ReleaseSnapshot(s1);
|
|
|
|
|
|
|
|
// Add a column family that doesn't support snapshot
|
|
|
|
ColumnFamilyOptions first;
|
|
|
|
first.memtable_factory.reset(NewHashCuckooRepFactory(1024 * 1024));
|
|
|
|
CreateColumnFamilies({"first"}, {first});
|
|
|
|
auto* s2 = dbfull()->GetSnapshot();
|
|
|
|
ASSERT_TRUE(s2 == nullptr);
|
|
|
|
|
|
|
|
// Add a column family that supports snapshot. Snapshot stays not supported.
|
|
|
|
ColumnFamilyOptions second;
|
|
|
|
CreateColumnFamilies({"second"}, {second});
|
|
|
|
auto* s3 = dbfull()->GetSnapshot();
|
|
|
|
ASSERT_TRUE(s3 == nullptr);
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, DifferentMergeOperators) {
|
2014-02-27 01:05:24 +01:00
|
|
|
Open();
|
|
|
|
CreateColumnFamilies({"first", "second"});
|
|
|
|
ColumnFamilyOptions default_cf, first, second;
|
|
|
|
first.merge_operator = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
second.merge_operator = MergeOperators::CreateStringAppendOperator();
|
|
|
|
Reopen({default_cf, first, second});
|
|
|
|
|
|
|
|
std::string one, two, three;
|
|
|
|
PutFixed64(&one, 1);
|
|
|
|
PutFixed64(&two, 2);
|
|
|
|
PutFixed64(&three, 3);
|
|
|
|
|
|
|
|
ASSERT_OK(Put(0, "foo", two));
|
|
|
|
ASSERT_OK(Put(0, "foo", one));
|
|
|
|
ASSERT_TRUE(Merge(0, "foo", two).IsNotSupported());
|
|
|
|
ASSERT_EQ(Get(0, "foo"), one);
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "foo", two));
|
|
|
|
ASSERT_OK(Put(1, "foo", one));
|
|
|
|
ASSERT_OK(Merge(1, "foo", two));
|
|
|
|
ASSERT_EQ(Get(1, "foo"), three);
|
|
|
|
|
|
|
|
ASSERT_OK(Put(2, "foo", two));
|
|
|
|
ASSERT_OK(Put(2, "foo", one));
|
|
|
|
ASSERT_OK(Merge(2, "foo", two));
|
|
|
|
ASSERT_EQ(Get(2, "foo"), one + "," + two);
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, DifferentCompactionStyles) {
|
2014-02-27 01:05:24 +01:00
|
|
|
Open();
|
|
|
|
CreateColumnFamilies({"one", "two"});
|
|
|
|
ColumnFamilyOptions default_cf, one, two;
|
|
|
|
db_options_.max_open_files = 20; // only 10 files in file cache
|
2014-02-27 01:24:56 +01:00
|
|
|
db_options_.disableDataSync = true;
|
2014-02-27 01:05:24 +01:00
|
|
|
|
|
|
|
default_cf.compaction_style = kCompactionStyleLevel;
|
|
|
|
default_cf.num_levels = 3;
|
|
|
|
default_cf.write_buffer_size = 64 << 10; // 64KB
|
2014-02-27 01:24:56 +01:00
|
|
|
default_cf.target_file_size_base = 30 << 10;
|
|
|
|
default_cf.source_compaction_factor = 100;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-02-27 01:05:24 +01:00
|
|
|
|
|
|
|
one.compaction_style = kCompactionStyleUniversal;
|
2015-03-30 23:04:21 +02:00
|
|
|
one.num_levels = 1;
|
2014-02-27 01:05:24 +01:00
|
|
|
// trigger compaction if there are >= 4 files
|
|
|
|
one.level0_file_num_compaction_trigger = 4;
|
|
|
|
one.write_buffer_size = 100000;
|
|
|
|
|
|
|
|
two.compaction_style = kCompactionStyleLevel;
|
|
|
|
two.num_levels = 4;
|
|
|
|
two.max_mem_compaction_level = 0;
|
|
|
|
two.level0_file_num_compaction_trigger = 3;
|
|
|
|
two.write_buffer_size = 100000;
|
|
|
|
|
|
|
|
Reopen({default_cf, one, two});
|
|
|
|
|
|
|
|
// SETUP column family "one" -- universal style
|
|
|
|
for (int i = 0; i < one.level0_file_num_compaction_trigger - 1; ++i) {
|
2014-03-18 02:37:34 +01:00
|
|
|
PutRandomData(1, 11, 10000);
|
2014-02-27 01:05:24 +01:00
|
|
|
WaitForFlush(1);
|
2014-11-25 05:44:49 +01:00
|
|
|
ASSERT_EQ(ToString(i + 1), FilesPerLevel(1));
|
2014-02-27 01:05:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// SETUP column family "two" -- level style with 4 levels
|
|
|
|
for (int i = 0; i < two.level0_file_num_compaction_trigger - 1; ++i) {
|
2014-03-18 02:37:34 +01:00
|
|
|
PutRandomData(2, 15, 10000);
|
2014-02-27 01:05:24 +01:00
|
|
|
WaitForFlush(2);
|
2014-11-25 05:44:49 +01:00
|
|
|
ASSERT_EQ(ToString(i + 1), FilesPerLevel(2));
|
2014-02-27 01:05:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TRIGGER compaction "one"
|
|
|
|
PutRandomData(1, 12, 10000);
|
|
|
|
|
|
|
|
// TRIGGER compaction "two"
|
2014-03-18 02:37:34 +01:00
|
|
|
PutRandomData(2, 10, 10000);
|
2014-02-27 01:05:24 +01:00
|
|
|
|
|
|
|
// WAIT for compactions
|
|
|
|
WaitForCompaction();
|
|
|
|
|
|
|
|
// VERIFY compaction "one"
|
|
|
|
ASSERT_EQ("1", FilesPerLevel(1));
|
|
|
|
|
|
|
|
// VERIFY compaction "two"
|
|
|
|
ASSERT_EQ("0,1", FilesPerLevel(2));
|
|
|
|
CompactAll(2);
|
|
|
|
ASSERT_EQ("0,1", FilesPerLevel(2));
|
|
|
|
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2014-03-08 01:12:34 +01:00
|
|
|
namespace {
|
|
|
|
std::string IterStatus(Iterator* iter) {
|
|
|
|
std::string result;
|
|
|
|
if (iter->Valid()) {
|
|
|
|
result = iter->key().ToString() + "->" + iter->value().ToString();
|
|
|
|
} else {
|
|
|
|
result = "(invalid)";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2014-03-11 22:52:17 +01:00
|
|
|
} // anonymous namespace
|
2014-03-08 01:12:34 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, NewIteratorsTest) {
|
2014-03-08 01:12:34 +01:00
|
|
|
// iter == 0 -- no tailing
|
|
|
|
// iter == 2 -- tailing
|
|
|
|
for (int iter = 0; iter < 2; ++iter) {
|
|
|
|
Open();
|
|
|
|
CreateColumnFamiliesAndReopen({"one", "two"});
|
|
|
|
ASSERT_OK(Put(0, "a", "b"));
|
|
|
|
ASSERT_OK(Put(1, "b", "a"));
|
|
|
|
ASSERT_OK(Put(2, "c", "m"));
|
|
|
|
ASSERT_OK(Put(2, "v", "t"));
|
|
|
|
std::vector<Iterator*> iterators;
|
|
|
|
ReadOptions options;
|
|
|
|
options.tailing = (iter == 1);
|
|
|
|
ASSERT_OK(db_->NewIterators(options, handles_, &iterators));
|
|
|
|
|
|
|
|
for (auto it : iterators) {
|
|
|
|
it->SeekToFirst();
|
|
|
|
}
|
|
|
|
ASSERT_EQ(IterStatus(iterators[0]), "a->b");
|
|
|
|
ASSERT_EQ(IterStatus(iterators[1]), "b->a");
|
|
|
|
ASSERT_EQ(IterStatus(iterators[2]), "c->m");
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "x", "x"));
|
|
|
|
|
|
|
|
for (auto it : iterators) {
|
|
|
|
it->Next();
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_EQ(IterStatus(iterators[0]), "(invalid)");
|
|
|
|
if (iter == 0) {
|
|
|
|
// no tailing
|
|
|
|
ASSERT_EQ(IterStatus(iterators[1]), "(invalid)");
|
|
|
|
} else {
|
|
|
|
// tailing
|
|
|
|
ASSERT_EQ(IterStatus(iterators[1]), "x->x");
|
|
|
|
}
|
|
|
|
ASSERT_EQ(IterStatus(iterators[2]), "v->t");
|
|
|
|
|
|
|
|
for (auto it : iterators) {
|
|
|
|
delete it;
|
|
|
|
}
|
|
|
|
Destroy();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, ReadOnlyDBTest) {
|
2014-04-09 18:56:17 +02:00
|
|
|
Open();
|
|
|
|
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
|
2014-07-23 22:52:11 +02:00
|
|
|
ASSERT_OK(Put(0, "a", "b"));
|
2014-04-09 18:56:17 +02:00
|
|
|
ASSERT_OK(Put(1, "foo", "bla"));
|
|
|
|
ASSERT_OK(Put(2, "foo", "blabla"));
|
|
|
|
ASSERT_OK(Put(3, "foo", "blablabla"));
|
|
|
|
ASSERT_OK(Put(4, "foo", "blablablabla"));
|
|
|
|
|
|
|
|
DropColumnFamilies({2});
|
|
|
|
Close();
|
|
|
|
// open only a subset of column families
|
|
|
|
AssertOpenReadOnly({"default", "one", "four"});
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(0, "foo"));
|
|
|
|
ASSERT_EQ("bla", Get(1, "foo"));
|
|
|
|
ASSERT_EQ("blablablabla", Get(2, "foo"));
|
|
|
|
|
2014-07-23 22:52:11 +02:00
|
|
|
|
|
|
|
// test newiterators
|
|
|
|
{
|
|
|
|
std::vector<Iterator*> iterators;
|
|
|
|
ASSERT_OK(db_->NewIterators(ReadOptions(), handles_, &iterators));
|
|
|
|
for (auto it : iterators) {
|
|
|
|
it->SeekToFirst();
|
|
|
|
}
|
|
|
|
ASSERT_EQ(IterStatus(iterators[0]), "a->b");
|
|
|
|
ASSERT_EQ(IterStatus(iterators[1]), "foo->bla");
|
|
|
|
ASSERT_EQ(IterStatus(iterators[2]), "foo->blablablabla");
|
|
|
|
for (auto it : iterators) {
|
|
|
|
it->Next();
|
|
|
|
}
|
|
|
|
ASSERT_EQ(IterStatus(iterators[0]), "(invalid)");
|
|
|
|
ASSERT_EQ(IterStatus(iterators[1]), "(invalid)");
|
|
|
|
ASSERT_EQ(IterStatus(iterators[2]), "(invalid)");
|
|
|
|
|
|
|
|
for (auto it : iterators) {
|
|
|
|
delete it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 18:56:17 +02:00
|
|
|
Close();
|
|
|
|
// can't open dropped column family
|
|
|
|
Status s = OpenReadOnly({"default", "one", "two"});
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
|
|
|
|
// Can't open without specifying default column family
|
|
|
|
s = OpenReadOnly({"one", "four"});
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, DontRollEmptyLogs) {
|
2014-04-15 18:57:25 +02:00
|
|
|
Open();
|
|
|
|
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
|
|
|
|
|
2014-04-29 21:33:57 +02:00
|
|
|
for (size_t i = 0; i < handles_.size(); ++i) {
|
2014-11-11 22:47:22 +01:00
|
|
|
PutRandomData(static_cast<int>(i), 10, 100);
|
2014-04-15 18:57:25 +02:00
|
|
|
}
|
|
|
|
int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls();
|
|
|
|
// this will trigger the flushes
|
2014-11-11 22:47:22 +01:00
|
|
|
for (int i = 0; i <= 4; ++i) {
|
2014-08-12 07:10:32 +02:00
|
|
|
ASSERT_OK(Flush(i));
|
|
|
|
}
|
2014-04-15 18:57:25 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable(handles_[i]);
|
|
|
|
}
|
|
|
|
int total_new_writable_files =
|
|
|
|
env_->GetNumberOfNewWritableFileCalls() - num_writable_file_start;
|
2014-04-29 21:47:48 +02:00
|
|
|
ASSERT_EQ(static_cast<size_t>(total_new_writable_files), handles_.size() + 1);
|
2014-04-16 02:12:18 +02:00
|
|
|
Close();
|
2014-04-15 18:57:25 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, FlushStaleColumnFamilies) {
|
2014-04-30 20:33:40 +02:00
|
|
|
Open();
|
|
|
|
CreateColumnFamilies({"one", "two"});
|
|
|
|
ColumnFamilyOptions default_cf, one, two;
|
|
|
|
default_cf.write_buffer_size = 100000; // small write buffer size
|
|
|
|
default_cf.disable_auto_compactions = true;
|
|
|
|
one.disable_auto_compactions = true;
|
|
|
|
two.disable_auto_compactions = true;
|
|
|
|
db_options_.max_total_wal_size = 210000;
|
|
|
|
|
|
|
|
Reopen({default_cf, one, two});
|
|
|
|
|
|
|
|
PutRandomData(2, 1, 10); // 10 bytes
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
PutRandomData(0, 100, 1000); // flush
|
|
|
|
WaitForFlush(0);
|
|
|
|
ASSERT_EQ(i + 1, CountLiveFiles());
|
|
|
|
}
|
|
|
|
// third flush. now, CF [two] should be detected as stale and flushed
|
|
|
|
// column family 1 should not be flushed since it's empty
|
|
|
|
PutRandomData(0, 100, 1000); // flush
|
|
|
|
WaitForFlush(0);
|
|
|
|
WaitForFlush(2);
|
|
|
|
// 3 files for default column families, 1 file for column family [two], zero
|
|
|
|
// files for column family [one], because it's empty
|
|
|
|
ASSERT_EQ(4, CountLiveFiles());
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, CreateMissingColumnFamilies) {
|
2014-06-07 03:04:56 +02:00
|
|
|
Status s = TryOpen({"one", "two"});
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
db_options_.create_missing_column_families = true;
|
|
|
|
s = TryOpen({"default", "one", "two"});
|
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(ColumnFamilyTest, SanitizeOptions) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
DBOptions db_options;
|
2015-05-22 20:35:40 +02:00
|
|
|
for (int s = kCompactionStyleLevel; s <= kCompactionStyleUniversal; ++s) {
|
|
|
|
for (int l = 0; l <= 2; l++) {
|
|
|
|
for (int i = 1; i <= 3; i++) {
|
|
|
|
for (int j = 1; j <= 3; j++) {
|
|
|
|
for (int k = 1; k <= 3; k++) {
|
|
|
|
ColumnFamilyOptions original;
|
|
|
|
original.compaction_style = static_cast<CompactionStyle>(s);
|
|
|
|
original.num_levels = l;
|
|
|
|
original.level0_stop_writes_trigger = i;
|
|
|
|
original.level0_slowdown_writes_trigger = j;
|
|
|
|
original.level0_file_num_compaction_trigger = k;
|
|
|
|
ColumnFamilyOptions result =
|
|
|
|
SanitizeOptions(db_options, nullptr, original);
|
|
|
|
ASSERT_TRUE(result.level0_stop_writes_trigger >=
|
|
|
|
result.level0_slowdown_writes_trigger);
|
|
|
|
ASSERT_TRUE(result.level0_slowdown_writes_trigger >=
|
|
|
|
result.level0_file_num_compaction_trigger);
|
|
|
|
ASSERT_TRUE(result.level0_file_num_compaction_trigger ==
|
|
|
|
original.level0_file_num_compaction_trigger);
|
|
|
|
if (s == kCompactionStyleLevel) {
|
|
|
|
ASSERT_GE(result.num_levels, 2);
|
|
|
|
} else {
|
|
|
|
ASSERT_GE(result.num_levels, 1);
|
|
|
|
if (original.num_levels >= 1) {
|
|
|
|
ASSERT_EQ(result.num_levels, original.num_levels);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-02-24 01:08:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-20 01:04:29 +01:00
|
|
|
TEST_F(ColumnFamilyTest, ReadDroppedColumnFamily) {
|
|
|
|
// iter 0 -- drop CF, don't reopen
|
|
|
|
// iter 1 -- delete CF, reopen
|
|
|
|
for (int iter = 0; iter < 2; ++iter) {
|
|
|
|
db_options_.create_missing_column_families = true;
|
|
|
|
db_options_.max_open_files = 20;
|
|
|
|
// delete obsolete files always
|
|
|
|
db_options_.delete_obsolete_files_period_micros = 0;
|
|
|
|
Open({"default", "one", "two"});
|
|
|
|
ColumnFamilyOptions options;
|
|
|
|
options.level0_file_num_compaction_trigger = 100;
|
|
|
|
options.level0_slowdown_writes_trigger = 200;
|
|
|
|
options.level0_stop_writes_trigger = 200;
|
|
|
|
options.write_buffer_size = 100000; // small write buffer size
|
|
|
|
Reopen({options, options, options});
|
|
|
|
|
|
|
|
// 1MB should create ~10 files for each CF
|
|
|
|
int kKeysNum = 10000;
|
|
|
|
PutRandomData(0, kKeysNum, 100);
|
|
|
|
PutRandomData(1, kKeysNum, 100);
|
|
|
|
PutRandomData(2, kKeysNum, 100);
|
|
|
|
|
|
|
|
if (iter == 0) {
|
|
|
|
// Drop CF two
|
|
|
|
ASSERT_OK(db_->DropColumnFamily(handles_[2]));
|
|
|
|
} else {
|
|
|
|
// delete CF two
|
|
|
|
delete handles_[2];
|
|
|
|
handles_[2] = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add bunch more data to other CFs
|
|
|
|
PutRandomData(0, kKeysNum, 100);
|
|
|
|
PutRandomData(1, kKeysNum, 100);
|
|
|
|
|
|
|
|
if (iter == 1) {
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since we didn't delete CF handle, RocksDB's contract guarantees that
|
|
|
|
// we're still able to read dropped CF
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
|
|
|
std::unique_ptr<Iterator> iterator(
|
|
|
|
db_->NewIterator(ReadOptions(), handles_[i]));
|
|
|
|
int count = 0;
|
|
|
|
for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
|
|
|
|
ASSERT_OK(iterator->status());
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(count, kKeysNum * ((i == 2) ? 1 : 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
Close();
|
|
|
|
Destroy();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-02 18:08:12 +01:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 22:08:00 +01:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2014-01-02 18:08:12 +01:00
|
|
|
}
|