2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2014-02-05 01:21:47 +01:00
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <stdio.h>
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
#include <algorithm>
|
2014-06-13 04:03:22 +02:00
|
|
|
#include <iostream>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <map>
|
2011-07-20 01:36:47 +02:00
|
|
|
#include <string>
|
2013-07-23 23:42:27 +02:00
|
|
|
#include <memory>
|
2013-10-10 20:43:24 +02:00
|
|
|
#include <vector>
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/memtable.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2014-12-02 21:09:20 +01:00
|
|
|
#include "db/writebuffer.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
|
|
|
#include "rocksdb/memtablerep.h"
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "rocksdb/slice_transform.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
|
2014-01-28 06:58:46 +01:00
|
|
|
#include "table/block.h"
|
2013-11-13 07:46:51 +01:00
|
|
|
#include "table/block_based_table_builder.h"
|
2013-11-20 07:00:48 +01:00
|
|
|
#include "table/block_based_table_factory.h"
|
2013-11-13 07:46:51 +01:00
|
|
|
#include "table/block_based_table_reader.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block_builder.h"
|
|
|
|
#include "table/format.h"
|
2014-01-28 06:58:46 +01:00
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "table/plain_table_factory.h"
|
2014-09-29 20:09:09 +02:00
|
|
|
#include "table/get_context.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
|
2015-01-09 22:04:06 +01:00
|
|
|
#include "util/compression.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/random.h"
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "util/statistics.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
2014-09-05 02:40:41 +02:00
|
|
|
#include "util/scoped_arena_iterator.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-03-03 02:07:03 +01:00
|
|
|
using std::vector;
|
|
|
|
using std::string;
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
|
|
|
2013-11-08 06:27:21 +01:00
|
|
|
namespace {
|
2014-01-24 20:09:04 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return reverse of "key".
|
|
|
|
// Used to test non-lexicographic comparators.
|
2014-01-24 20:09:04 +01:00
|
|
|
std::string Reverse(const Slice& key) {
|
|
|
|
auto rev = key.ToString();
|
|
|
|
std::reverse(rev.begin(), rev.end());
|
2011-03-18 23:37:00 +01:00
|
|
|
return rev;
|
|
|
|
}
|
|
|
|
|
|
|
|
class ReverseKeyComparator : public Comparator {
|
|
|
|
public:
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual const char* Name() const override {
|
2013-10-05 07:32:05 +02:00
|
|
|
return "rocksdb.ReverseBytewiseComparator";
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual int Compare(const Slice& a, const Slice& b) const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void FindShortestSeparator(std::string* start,
|
|
|
|
const Slice& limit) const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string s = Reverse(*start);
|
|
|
|
std::string l = Reverse(limit);
|
|
|
|
BytewiseComparator()->FindShortestSeparator(&s, l);
|
|
|
|
*start = Reverse(s);
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void FindShortSuccessor(std::string* key) const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string s = Reverse(*key);
|
|
|
|
BytewiseComparator()->FindShortSuccessor(&s);
|
|
|
|
*key = Reverse(s);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
ReverseKeyComparator reverse_key_comparator;
|
|
|
|
|
|
|
|
void Increment(const Comparator* cmp, std::string* key) {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (cmp == BytewiseComparator()) {
|
|
|
|
key->push_back('\0');
|
|
|
|
} else {
|
|
|
|
assert(cmp == &reverse_key_comparator);
|
|
|
|
std::string rev = Reverse(*key);
|
|
|
|
rev.push_back('\0');
|
|
|
|
*key = Reverse(rev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// An STL comparator that uses a Comparator
|
|
|
|
struct STLLessThan {
|
|
|
|
const Comparator* cmp;
|
|
|
|
|
|
|
|
STLLessThan() : cmp(BytewiseComparator()) { }
|
2013-03-01 03:04:58 +01:00
|
|
|
explicit STLLessThan(const Comparator* c) : cmp(c) { }
|
2011-03-18 23:37:00 +01:00
|
|
|
bool operator()(const std::string& a, const std::string& b) const {
|
|
|
|
return cmp->Compare(Slice(a), Slice(b)) < 0;
|
|
|
|
}
|
|
|
|
};
|
2014-01-24 20:09:04 +01:00
|
|
|
|
2011-10-31 18:22:06 +01:00
|
|
|
} // namespace
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
typedef std::map<std::string, std::string, STLLessThan> KVMap;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Helper class for tests to unify the interface between
|
|
|
|
// BlockBuilder/TableBuilder and Block/Table.
|
|
|
|
class Constructor {
|
|
|
|
public:
|
2014-01-24 20:09:04 +01:00
|
|
|
explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
virtual ~Constructor() { }
|
|
|
|
|
|
|
|
void Add(const std::string& key, const Slice& value) {
|
|
|
|
data_[key] = value.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish constructing the data structure with all the keys that have
|
|
|
|
// been added so far. Returns the keys in sorted order in "*keys"
|
|
|
|
// and stores the key/value pairs in "*kvmap"
|
|
|
|
void Finish(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
std::vector<std::string>* keys, KVMap* kvmap) {
|
|
|
|
last_internal_key_ = &internal_comparator;
|
2011-03-18 23:37:00 +01:00
|
|
|
*kvmap = data_;
|
|
|
|
keys->clear();
|
|
|
|
for (KVMap::const_iterator it = data_.begin();
|
|
|
|
it != data_.end();
|
|
|
|
++it) {
|
|
|
|
keys->push_back(it->first);
|
|
|
|
}
|
|
|
|
data_.clear();
|
2014-09-05 01:18:36 +02:00
|
|
|
Status s = FinishImpl(options, ioptions, table_options,
|
|
|
|
internal_comparator, *kvmap);
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_TRUE(s.ok()) << s.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the data structure from the data in "data"
|
2014-01-27 22:53:22 +01:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
const KVMap& data) = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
virtual Iterator* NewIterator() const = 0;
|
|
|
|
|
|
|
|
virtual const KVMap& data() { return data_; }
|
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool IsArenaMode() const { return false; }
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
|
2011-03-21 20:40:57 +01:00
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool AnywayDeleteIterator() const { return false; }
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
protected:
|
|
|
|
const InternalKeyComparator* last_internal_key_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
KVMap data_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class BlockConstructor: public Constructor {
|
|
|
|
public:
|
|
|
|
explicit BlockConstructor(const Comparator* cmp)
|
|
|
|
: Constructor(cmp),
|
|
|
|
comparator_(cmp),
|
2013-03-01 03:04:58 +01:00
|
|
|
block_(nullptr) { }
|
2011-03-18 23:37:00 +01:00
|
|
|
~BlockConstructor() {
|
|
|
|
delete block_;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-02-26 20:28:41 +01:00
|
|
|
const KVMap& kv_map) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
delete block_;
|
2013-03-01 03:04:58 +01:00
|
|
|
block_ = nullptr;
|
2014-09-02 20:49:38 +02:00
|
|
|
BlockBuilder builder(table_options.block_restart_interval);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
|
|
|
builder.Add(kv.first, kv.second);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
// Open the block
|
2012-04-17 17:36:46 +02:00
|
|
|
data_ = builder.Finish().ToString();
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = data_;
|
|
|
|
contents.cachable = false;
|
2014-08-16 00:05:09 +02:00
|
|
|
block_ = new Block(std::move(contents));
|
2011-03-18 23:37:00 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Iterator* NewIterator() const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
return block_->NewIterator(comparator_);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const Comparator* comparator_;
|
2012-04-17 17:36:46 +02:00
|
|
|
std::string data_;
|
2011-03-18 23:37:00 +01:00
|
|
|
Block* block_;
|
|
|
|
|
|
|
|
BlockConstructor();
|
|
|
|
};
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
// A helper class that converts internal format keys into user keys
|
|
|
|
class KeyConvertingIterator: public Iterator {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
2014-09-05 02:40:41 +02:00
|
|
|
KeyConvertingIterator(Iterator* iter, bool arena_mode = false)
|
|
|
|
: iter_(iter), arena_mode_(arena_mode) {}
|
|
|
|
virtual ~KeyConvertingIterator() {
|
|
|
|
if (arena_mode_) {
|
|
|
|
iter_->~Iterator();
|
|
|
|
} else {
|
|
|
|
delete iter_;
|
|
|
|
}
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual bool Valid() const override { return iter_->Valid(); }
|
|
|
|
virtual void Seek(const Slice& target) override {
|
2013-12-20 18:35:24 +01:00
|
|
|
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
|
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
|
|
|
iter_->Seek(encoded);
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void SeekToFirst() override { iter_->SeekToFirst(); }
|
|
|
|
virtual void SeekToLast() override { iter_->SeekToLast(); }
|
|
|
|
virtual void Next() override { iter_->Next(); }
|
|
|
|
virtual void Prev() override { iter_->Prev(); }
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice key() const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
assert(Valid());
|
2014-11-06 20:14:28 +01:00
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
if (!ParseInternalKey(iter_->key(), &parsed_key)) {
|
2013-12-20 18:35:24 +01:00
|
|
|
status_ = Status::Corruption("malformed internal key");
|
|
|
|
return Slice("corrupted key");
|
|
|
|
}
|
2014-11-06 20:14:28 +01:00
|
|
|
return parsed_key.user_key;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-01-24 19:57:15 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice value() const override { return iter_->value(); }
|
|
|
|
virtual Status status() const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
return status_.ok() ? iter_->status() : status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
mutable Status status_;
|
|
|
|
Iterator* iter_;
|
2014-09-05 02:40:41 +02:00
|
|
|
bool arena_mode_;
|
2013-12-20 18:35:24 +01:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
KeyConvertingIterator(const KeyConvertingIterator&);
|
|
|
|
void operator=(const KeyConvertingIterator&);
|
|
|
|
};
|
|
|
|
|
|
|
|
class TableConstructor: public Constructor {
|
|
|
|
public:
|
2014-01-28 19:35:48 +01:00
|
|
|
explicit TableConstructor(const Comparator* cmp,
|
2014-04-25 21:21:34 +02:00
|
|
|
bool convert_to_internal_key = false)
|
2014-02-08 01:25:38 +01:00
|
|
|
: Constructor(cmp),
|
2014-04-25 21:21:34 +02:00
|
|
|
convert_to_internal_key_(convert_to_internal_key) {}
|
2014-01-28 19:35:48 +01:00
|
|
|
~TableConstructor() { Reset(); }
|
2014-01-24 19:57:15 +01:00
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-02-26 20:28:41 +01:00
|
|
|
const KVMap& kv_map) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Reset();
|
2015-08-05 16:33:27 +02:00
|
|
|
file_writer_.reset(test::GetWritableFileWriter(new test::StringSink()));
|
2013-12-20 18:35:24 +01:00
|
|
|
unique_ptr<TableBuilder> builder;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
2014-09-05 01:18:36 +02:00
|
|
|
builder.reset(ioptions.table_factory->NewTableBuilder(
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
TableBuilderOptions(ioptions, internal_comparator,
|
|
|
|
&int_tbl_prop_collector_factories,
|
|
|
|
options.compression, CompressionOptions(), false),
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer_.get()));
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
2013-12-20 18:35:24 +01:00
|
|
|
if (convert_to_internal_key_) {
|
2014-11-06 20:14:28 +01:00
|
|
|
ParsedInternalKey ikey(kv.first, kMaxSequenceNumber, kTypeValue);
|
2013-12-20 18:35:24 +01:00
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
2014-11-06 20:14:28 +01:00
|
|
|
builder->Add(encoded, kv.second);
|
2013-12-20 18:35:24 +01:00
|
|
|
} else {
|
2014-11-06 20:14:28 +01:00
|
|
|
builder->Add(kv.first, kv.second);
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(builder->status().ok());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
Status s = builder->Finish();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer_->Flush();
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(s.ok()) << s.ToString();
|
2011-03-18 23:37:00 +01:00
|
|
|
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
EXPECT_EQ(GetSink()->contents().size(), builder->FileSize());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Open the table
|
2013-02-01 00:20:24 +01:00
|
|
|
uniq_id_ = cur_uniq_id_++;
|
2015-08-05 16:33:27 +02:00
|
|
|
file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
|
2014-09-05 01:18:36 +02:00
|
|
|
return ioptions.table_factory->NewTableReader(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
ioptions, soptions, internal_comparator, std::move(file_reader_),
|
|
|
|
GetSink()->contents().size(), &table_reader_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Iterator* NewIterator() const override {
|
2014-02-08 01:25:38 +01:00
|
|
|
ReadOptions ro;
|
|
|
|
Iterator* iter = table_reader_->NewIterator(ro);
|
2013-12-20 18:35:24 +01:00
|
|
|
if (convert_to_internal_key_) {
|
|
|
|
return new KeyConvertingIterator(iter);
|
|
|
|
} else {
|
|
|
|
return iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ApproximateOffsetOf(const Slice& key) const {
|
2013-10-30 18:52:33 +01:00
|
|
|
return table_reader_->ApproximateOffsetOf(key);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
virtual Status Reopen(const ImmutableCFOptions& ioptions) {
|
2015-08-05 16:33:27 +02:00
|
|
|
file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
|
2014-09-05 01:18:36 +02:00
|
|
|
return ioptions.table_factory->NewTableReader(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
ioptions, soptions, *last_internal_key_, std::move(file_reader_),
|
|
|
|
GetSink()->contents().size(), &table_reader_);
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
virtual TableReader* GetTableReader() {
|
2013-10-30 18:52:33 +01:00
|
|
|
return table_reader_.get();
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool AnywayDeleteIterator() const override {
|
|
|
|
return convert_to_internal_key_;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
void Reset() {
|
2013-02-01 00:20:24 +01:00
|
|
|
uniq_id_ = 0;
|
2013-10-30 18:52:33 +01:00
|
|
|
table_reader_.reset();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer_.reset();
|
|
|
|
file_reader_.reset();
|
|
|
|
}
|
|
|
|
|
2015-08-05 16:33:27 +02:00
|
|
|
test::StringSink* GetSink() {
|
|
|
|
return static_cast<test::StringSink*>(file_writer_->writable_file());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
uint64_t uniq_id_;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<WritableFileWriter> file_writer_;
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader_;
|
2013-10-30 18:52:33 +01:00
|
|
|
unique_ptr<TableReader> table_reader_;
|
2014-09-05 02:40:41 +02:00
|
|
|
bool convert_to_internal_key_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor();
|
2013-02-01 00:20:24 +01:00
|
|
|
|
|
|
|
static uint64_t cur_uniq_id_;
|
2013-06-08 00:35:17 +02:00
|
|
|
const EnvOptions soptions;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
2013-12-20 18:35:24 +01:00
|
|
|
uint64_t TableConstructor::cur_uniq_id_ = 1;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class MemTableConstructor: public Constructor {
|
|
|
|
public:
|
2014-12-02 21:09:20 +01:00
|
|
|
explicit MemTableConstructor(const Comparator* cmp, WriteBuffer* wb)
|
2011-03-18 23:37:00 +01:00
|
|
|
: Constructor(cmp),
|
2013-07-23 23:42:27 +02:00
|
|
|
internal_comparator_(cmp),
|
2014-12-02 21:09:20 +01:00
|
|
|
write_buffer_(wb),
|
2013-07-23 23:42:27 +02:00
|
|
|
table_factory_(new SkipListFactory) {
|
2014-12-02 21:09:20 +01:00
|
|
|
options_.memtable_factory = table_factory_;
|
|
|
|
ImmutableCFOptions ioptions(options_);
|
2014-10-02 01:19:16 +02:00
|
|
|
memtable_ = new MemTable(internal_comparator_, ioptions,
|
2015-05-29 23:36:35 +02:00
|
|
|
MutableCFOptions(options_, ioptions), wb,
|
|
|
|
kMaxSequenceNumber);
|
2011-05-21 04:17:43 +02:00
|
|
|
memtable_->Ref();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
~MemTableConstructor() {
|
2013-12-02 06:23:44 +01:00
|
|
|
delete memtable_->Unref();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-11-06 20:14:28 +01:00
|
|
|
virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-02-26 20:28:41 +01:00
|
|
|
const KVMap& kv_map) override {
|
2013-12-02 06:23:44 +01:00
|
|
|
delete memtable_->Unref();
|
2014-12-02 21:09:20 +01:00
|
|
|
ImmutableCFOptions mem_ioptions(ioptions);
|
2014-10-02 01:19:16 +02:00
|
|
|
memtable_ = new MemTable(internal_comparator_, mem_ioptions,
|
2014-12-02 21:09:20 +01:00
|
|
|
MutableCFOptions(options_, mem_ioptions),
|
2015-05-29 23:36:35 +02:00
|
|
|
write_buffer_, kMaxSequenceNumber);
|
2011-05-21 04:17:43 +02:00
|
|
|
memtable_->Ref();
|
2011-03-18 23:37:00 +01:00
|
|
|
int seq = 1;
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
|
|
|
memtable_->Add(seq, kTypeValue, kv.first, kv.second);
|
2011-03-18 23:37:00 +01:00
|
|
|
seq++;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Iterator* NewIterator() const override {
|
2014-09-05 02:40:41 +02:00
|
|
|
return new KeyConvertingIterator(
|
|
|
|
memtable_->NewIterator(ReadOptions(), &arena_), true);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool AnywayDeleteIterator() const override { return true; }
|
|
|
|
|
|
|
|
virtual bool IsArenaMode() const override { return true; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2014-09-05 02:40:41 +02:00
|
|
|
mutable Arena arena_;
|
2011-03-18 23:37:00 +01:00
|
|
|
InternalKeyComparator internal_comparator_;
|
2014-12-02 21:09:20 +01:00
|
|
|
Options options_;
|
|
|
|
WriteBuffer* write_buffer_;
|
2011-03-18 23:37:00 +01:00
|
|
|
MemTable* memtable_;
|
2013-07-23 23:42:27 +02:00
|
|
|
std::shared_ptr<SkipListFactory> table_factory_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class DBConstructor: public Constructor {
|
|
|
|
public:
|
|
|
|
explicit DBConstructor(const Comparator* cmp)
|
|
|
|
: Constructor(cmp),
|
|
|
|
comparator_(cmp) {
|
2013-03-01 03:04:58 +01:00
|
|
|
db_ = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
NewDB();
|
|
|
|
}
|
|
|
|
~DBConstructor() {
|
|
|
|
delete db_;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-02-26 20:28:41 +01:00
|
|
|
const KVMap& kv_map) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
delete db_;
|
2013-03-01 03:04:58 +01:00
|
|
|
db_ = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
NewDB();
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
2011-03-18 23:37:00 +01:00
|
|
|
WriteBatch batch;
|
2014-11-06 20:14:28 +01:00
|
|
|
batch.Put(kv.first, kv.second);
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Iterator* NewIterator() const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
return db_->NewIterator(ReadOptions());
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual DB* db() const override { return db_; }
|
2011-03-21 20:40:57 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
void NewDB() {
|
|
|
|
std::string name = test::TmpDir() + "/table_testdb";
|
|
|
|
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2011-03-18 23:37:00 +01:00
|
|
|
options.comparator = comparator_;
|
|
|
|
Status status = DestroyDB(name, options);
|
|
|
|
ASSERT_TRUE(status.ok()) << status.ToString();
|
|
|
|
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.error_if_exists = true;
|
2011-03-21 20:40:57 +01:00
|
|
|
options.write_buffer_size = 10000; // Something small to force merging
|
2011-03-18 23:37:00 +01:00
|
|
|
status = DB::Open(options, name, &db_);
|
|
|
|
ASSERT_TRUE(status.ok()) << status.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
const Comparator* comparator_;
|
|
|
|
DB* db_;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum TestType {
|
2013-12-20 18:35:24 +01:00
|
|
|
BLOCK_BASED_TABLE_TEST,
|
2015-07-20 20:09:14 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2013-12-20 18:35:24 +01:00
|
|
|
PLAIN_TABLE_SEMI_FIXED_PREFIX,
|
|
|
|
PLAIN_TABLE_FULL_STR_PREFIX,
|
2014-02-08 01:25:38 +01:00
|
|
|
PLAIN_TABLE_TOTAL_ORDER,
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2011-03-18 23:37:00 +01:00
|
|
|
BLOCK_TEST,
|
|
|
|
MEMTABLE_TEST,
|
2011-07-20 01:36:47 +02:00
|
|
|
DB_TEST
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct TestArgs {
|
|
|
|
TestType type;
|
|
|
|
bool reverse_compare;
|
|
|
|
int restart_interval;
|
2012-06-28 08:41:33 +02:00
|
|
|
CompressionType compression;
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t format_version;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-11-20 07:00:48 +01:00
|
|
|
static std::vector<TestArgs> GenerateArgList() {
|
2014-01-24 20:09:04 +01:00
|
|
|
std::vector<TestArgs> test_args;
|
|
|
|
std::vector<TestType> test_types = {
|
2015-07-20 20:09:14 +02:00
|
|
|
BLOCK_BASED_TABLE_TEST,
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
PLAIN_TABLE_SEMI_FIXED_PREFIX,
|
|
|
|
PLAIN_TABLE_FULL_STR_PREFIX,
|
|
|
|
PLAIN_TABLE_TOTAL_ORDER,
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
BLOCK_TEST,
|
|
|
|
MEMTABLE_TEST, DB_TEST};
|
2014-01-24 20:09:04 +01:00
|
|
|
std::vector<bool> reverse_compare_types = {false, true};
|
|
|
|
std::vector<int> restart_intervals = {16, 1, 1024};
|
2012-06-28 08:41:33 +02:00
|
|
|
|
|
|
|
// Only add compression if it is supported
|
2015-01-15 01:24:24 +01:00
|
|
|
std::vector<std::pair<CompressionType, bool>> compression_types;
|
|
|
|
compression_types.emplace_back(kNoCompression, false);
|
2015-04-06 21:50:44 +02:00
|
|
|
if (Snappy_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kSnappyCompression, false);
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2015-04-06 21:50:44 +02:00
|
|
|
if (Zlib_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kZlibCompression, false);
|
|
|
|
compression_types.emplace_back(kZlibCompression, true);
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2015-04-06 21:50:44 +02:00
|
|
|
if (BZip2_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kBZip2Compression, false);
|
|
|
|
compression_types.emplace_back(kBZip2Compression, true);
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2015-04-06 21:50:44 +02:00
|
|
|
if (LZ4_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kLZ4Compression, false);
|
|
|
|
compression_types.emplace_back(kLZ4Compression, true);
|
|
|
|
compression_types.emplace_back(kLZ4HCCompression, false);
|
|
|
|
compression_types.emplace_back(kLZ4HCCompression, true);
|
2014-02-08 03:12:30 +01:00
|
|
|
}
|
2015-08-28 00:40:42 +02:00
|
|
|
if (ZSTD_Supported()) {
|
|
|
|
compression_types.emplace_back(kZSTDNotFinalCompression, false);
|
|
|
|
compression_types.emplace_back(kZSTDNotFinalCompression, true);
|
|
|
|
}
|
2012-06-29 04:26:43 +02:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
for (auto test_type : test_types) {
|
|
|
|
for (auto reverse_compare : reverse_compare_types) {
|
2015-07-20 20:09:14 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-01-24 20:09:04 +01:00
|
|
|
if (test_type == PLAIN_TABLE_SEMI_FIXED_PREFIX ||
|
|
|
|
test_type == PLAIN_TABLE_FULL_STR_PREFIX) {
|
2013-12-20 18:35:24 +01:00
|
|
|
// Plain table doesn't use restart index or compression.
|
|
|
|
TestArgs one_arg;
|
2014-01-24 20:09:04 +01:00
|
|
|
one_arg.type = test_type;
|
|
|
|
one_arg.reverse_compare = reverse_compare;
|
|
|
|
one_arg.restart_interval = restart_intervals[0];
|
2015-01-15 01:24:24 +01:00
|
|
|
one_arg.compression = compression_types[0].first;
|
2014-01-24 20:09:04 +01:00
|
|
|
test_args.push_back(one_arg);
|
2013-12-20 18:35:24 +01:00
|
|
|
continue;
|
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2012-06-28 08:41:33 +02:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
for (auto restart_interval : restart_intervals) {
|
|
|
|
for (auto compression_type : compression_types) {
|
2013-12-20 18:35:24 +01:00
|
|
|
TestArgs one_arg;
|
2014-01-24 20:09:04 +01:00
|
|
|
one_arg.type = test_type;
|
|
|
|
one_arg.reverse_compare = reverse_compare;
|
|
|
|
one_arg.restart_interval = restart_interval;
|
2015-01-15 01:24:24 +01:00
|
|
|
one_arg.compression = compression_type.first;
|
|
|
|
one_arg.format_version = compression_type.second ? 2 : 1;
|
2014-01-24 20:09:04 +01:00
|
|
|
test_args.push_back(one_arg);
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
|
|
|
return test_args;
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
// In order to make all tests run for plain table format, including
|
|
|
|
// those operating on empty keys, create a new prefix transformer which
|
|
|
|
// return fixed prefix if the slice is not shorter than the prefix length,
|
|
|
|
// and the full slice if it is shorter.
|
|
|
|
class FixedOrLessPrefixTransform : public SliceTransform {
|
|
|
|
private:
|
|
|
|
const size_t prefix_len_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit FixedOrLessPrefixTransform(size_t prefix_len) :
|
|
|
|
prefix_len_(prefix_len) {
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual const char* Name() const override { return "rocksdb.FixedPrefix"; }
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice Transform(const Slice& src) const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
assert(InDomain(src));
|
|
|
|
if (src.size() < prefix_len_) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
return Slice(src.data(), prefix_len_);
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual bool InDomain(const Slice& src) const override { return true; }
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual bool InRange(const Slice& dst) const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
return (dst.size() <= prefix_len_);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class HarnessTest : public testing::Test {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
2015-03-17 02:08:59 +01:00
|
|
|
HarnessTest()
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
: ioptions_(options_),
|
|
|
|
constructor_(nullptr),
|
|
|
|
write_buffer_(options_.db_write_buffer_size) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void Init(const TestArgs& args) {
|
|
|
|
delete constructor_;
|
2013-03-01 03:04:58 +01:00
|
|
|
constructor_ = nullptr;
|
2013-11-20 07:00:48 +01:00
|
|
|
options_ = Options();
|
2012-06-28 08:41:33 +02:00
|
|
|
options_.compression = args.compression;
|
2011-03-18 23:37:00 +01:00
|
|
|
// Use shorter block size for tests to exercise block boundary
|
|
|
|
// conditions more.
|
|
|
|
if (args.reverse_compare) {
|
|
|
|
options_.comparator = &reverse_key_comparator;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
|
|
|
|
internal_comparator_.reset(
|
|
|
|
new test::PlainInternalKeyComparator(options_.comparator));
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
support_prev_ = true;
|
|
|
|
only_support_prefix_seek_ = false;
|
2011-03-18 23:37:00 +01:00
|
|
|
switch (args.type) {
|
2013-12-20 18:35:24 +01:00
|
|
|
case BLOCK_BASED_TABLE_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.flush_block_policy_factory.reset(
|
2014-03-01 01:39:27 +01:00
|
|
|
new FlushBlockBySizePolicyFactory());
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
table_options_.block_restart_interval = args.restart_interval;
|
2015-01-15 01:24:24 +01:00
|
|
|
table_options_.format_version = args.format_version;
|
2014-08-25 23:22:05 +02:00
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2013-12-20 18:35:24 +01:00
|
|
|
constructor_ = new TableConstructor(options_.comparator);
|
|
|
|
break;
|
2015-07-20 20:09:14 +02:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2013-12-20 18:35:24 +01:00
|
|
|
case PLAIN_TABLE_SEMI_FIXED_PREFIX:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = true;
|
2014-03-10 20:56:46 +01:00
|
|
|
options_.prefix_extractor.reset(new FixedOrLessPrefixTransform(2));
|
2013-12-20 18:35:24 +01:00
|
|
|
options_.allow_mmap_reads = true;
|
2014-02-08 01:25:38 +01:00
|
|
|
options_.table_factory.reset(NewPlainTableFactory());
|
2014-04-25 21:21:34 +02:00
|
|
|
constructor_ = new TableConstructor(options_.comparator, true);
|
2014-01-27 22:53:22 +01:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2013-12-20 18:35:24 +01:00
|
|
|
break;
|
|
|
|
case PLAIN_TABLE_FULL_STR_PREFIX:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = true;
|
2014-03-10 20:56:46 +01:00
|
|
|
options_.prefix_extractor.reset(NewNoopTransform());
|
2013-12-20 18:35:24 +01:00
|
|
|
options_.allow_mmap_reads = true;
|
2014-02-08 01:25:38 +01:00
|
|
|
options_.table_factory.reset(NewPlainTableFactory());
|
2014-04-25 21:21:34 +02:00
|
|
|
constructor_ = new TableConstructor(options_.comparator, true);
|
2014-02-08 01:25:38 +01:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
|
|
|
break;
|
|
|
|
case PLAIN_TABLE_TOTAL_ORDER:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = false;
|
|
|
|
options_.prefix_extractor = nullptr;
|
|
|
|
options_.allow_mmap_reads = true;
|
2014-07-18 09:08:38 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = kPlainTableVariableLength;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
options_.table_factory.reset(
|
|
|
|
NewPlainTableFactory(plain_table_options));
|
|
|
|
}
|
2014-04-25 21:21:34 +02:00
|
|
|
constructor_ = new TableConstructor(options_.comparator, true);
|
2014-01-27 22:53:22 +01:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2011-03-18 23:37:00 +01:00
|
|
|
break;
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2011-03-18 23:37:00 +01:00
|
|
|
case BLOCK_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2011-03-18 23:37:00 +01:00
|
|
|
constructor_ = new BlockConstructor(options_.comparator);
|
|
|
|
break;
|
|
|
|
case MEMTABLE_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2014-12-02 21:09:20 +01:00
|
|
|
constructor_ = new MemTableConstructor(options_.comparator,
|
|
|
|
&write_buffer_);
|
2011-03-18 23:37:00 +01:00
|
|
|
break;
|
|
|
|
case DB_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2011-03-18 23:37:00 +01:00
|
|
|
constructor_ = new DBConstructor(options_.comparator);
|
|
|
|
break;
|
|
|
|
}
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions_ = ImmutableCFOptions(options_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
~HarnessTest() { delete constructor_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void Add(const std::string& key, const std::string& value) {
|
|
|
|
constructor_->Add(key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Test(Random* rnd) {
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap data;
|
2014-09-05 01:18:36 +02:00
|
|
|
constructor_->Finish(options_, ioptions_, table_options_,
|
|
|
|
*internal_comparator_, &keys, &data);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
TestForwardScan(keys, data);
|
2013-12-20 18:35:24 +01:00
|
|
|
if (support_prev_) {
|
|
|
|
TestBackwardScan(keys, data);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
TestRandomAccess(rnd, keys, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TestForwardScan(const std::vector<std::string>& keys,
|
|
|
|
const KVMap& data) {
|
|
|
|
Iterator* iter = constructor_->NewIterator();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
for (KVMap::const_iterator model_iter = data.begin();
|
|
|
|
model_iter != data.end();
|
|
|
|
++model_iter) {
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-09-05 02:40:41 +02:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
|
|
|
iter->~Iterator();
|
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void TestBackwardScan(const std::vector<std::string>& keys,
|
|
|
|
const KVMap& data) {
|
|
|
|
Iterator* iter = constructor_->NewIterator();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
iter->SeekToLast();
|
|
|
|
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
|
|
|
|
model_iter != data.rend();
|
|
|
|
++model_iter) {
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
iter->Prev();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-09-05 02:40:41 +02:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
|
|
|
iter->~Iterator();
|
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void TestRandomAccess(Random* rnd,
|
|
|
|
const std::vector<std::string>& keys,
|
|
|
|
const KVMap& data) {
|
|
|
|
static const bool kVerbose = false;
|
|
|
|
Iterator* iter = constructor_->NewIterator();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
KVMap::const_iterator model_iter = data.begin();
|
|
|
|
if (kVerbose) fprintf(stderr, "---\n");
|
|
|
|
for (int i = 0; i < 200; i++) {
|
2013-12-20 18:35:24 +01:00
|
|
|
const int toss = rnd->Uniform(support_prev_ ? 5 : 3);
|
2011-03-18 23:37:00 +01:00
|
|
|
switch (toss) {
|
|
|
|
case 0: {
|
|
|
|
if (iter->Valid()) {
|
|
|
|
if (kVerbose) fprintf(stderr, "Next\n");
|
|
|
|
iter->Next();
|
|
|
|
++model_iter;
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 1: {
|
|
|
|
if (kVerbose) fprintf(stderr, "SeekToFirst\n");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
model_iter = data.begin();
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 2: {
|
|
|
|
std::string key = PickRandomKey(rnd, keys);
|
|
|
|
model_iter = data.lower_bound(key);
|
|
|
|
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
|
|
|
|
EscapeString(key).c_str());
|
|
|
|
iter->Seek(Slice(key));
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 3: {
|
|
|
|
if (iter->Valid()) {
|
|
|
|
if (kVerbose) fprintf(stderr, "Prev\n");
|
|
|
|
iter->Prev();
|
|
|
|
if (model_iter == data.begin()) {
|
|
|
|
model_iter = data.end(); // Wrap around to invalid value
|
|
|
|
} else {
|
|
|
|
--model_iter;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 4: {
|
|
|
|
if (kVerbose) fprintf(stderr, "SeekToLast\n");
|
|
|
|
iter->SeekToLast();
|
|
|
|
if (keys.empty()) {
|
|
|
|
model_iter = data.end();
|
|
|
|
} else {
|
|
|
|
std::string last = data.rbegin()->first;
|
|
|
|
model_iter = data.lower_bound(last);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-05 02:40:41 +02:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
|
|
|
iter->~Iterator();
|
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string ToString(const KVMap& data, const KVMap::const_iterator& it) {
|
|
|
|
if (it == data.end()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->first + "->" + it->second + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string ToString(const KVMap& data,
|
|
|
|
const KVMap::const_reverse_iterator& it) {
|
|
|
|
if (it == data.rend()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->first + "->" + it->second + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string ToString(const Iterator* it) {
|
|
|
|
if (!it->Valid()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->key().ToString() + "->" + it->value().ToString() + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) {
|
|
|
|
if (keys.empty()) {
|
|
|
|
return "foo";
|
|
|
|
} else {
|
2014-11-11 22:47:22 +01:00
|
|
|
const int index = rnd->Uniform(static_cast<int>(keys.size()));
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string result = keys[index];
|
2013-12-20 18:35:24 +01:00
|
|
|
switch (rnd->Uniform(support_prev_ ? 3 : 1)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
case 0:
|
|
|
|
// Return an existing key
|
|
|
|
break;
|
|
|
|
case 1: {
|
|
|
|
// Attempt to return something smaller than an existing key
|
2013-12-20 18:35:24 +01:00
|
|
|
if (result.size() > 0 && result[result.size() - 1] > '\0'
|
|
|
|
&& (!only_support_prefix_seek_
|
|
|
|
|| options_.prefix_extractor->Transform(result).size()
|
|
|
|
< result.size())) {
|
|
|
|
result[result.size() - 1]--;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
break;
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
case 2: {
|
|
|
|
// Return something larger than an existing key
|
|
|
|
Increment(options_.comparator, &result);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
// Returns nullptr if not running against a DB
|
2011-03-21 20:40:57 +01:00
|
|
|
DB* db() const { return constructor_->db(); }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options_ = Options();
|
2014-09-05 01:18:36 +02:00
|
|
|
ImmutableCFOptions ioptions_;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options_ = BlockBasedTableOptions();
|
2011-03-18 23:37:00 +01:00
|
|
|
Constructor* constructor_;
|
2014-12-02 21:09:20 +01:00
|
|
|
WriteBuffer write_buffer_;
|
2013-12-20 18:35:24 +01:00
|
|
|
bool support_prev_;
|
|
|
|
bool only_support_prefix_seek_;
|
2014-01-27 22:53:22 +01:00
|
|
|
shared_ptr<InternalKeyComparator> internal_comparator_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|
|
|
bool result = (val >= low) && (val <= high);
|
|
|
|
if (!result) {
|
|
|
|
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
|
|
|
(unsigned long long)(val),
|
|
|
|
(unsigned long long)(low),
|
|
|
|
(unsigned long long)(high));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-01-24 21:14:08 +01:00
|
|
|
// Tests against all kinds of tables
|
2015-03-17 22:08:00 +01:00
|
|
|
class TableTest : public testing::Test {
|
2014-01-27 22:53:22 +01:00
|
|
|
public:
|
|
|
|
const InternalKeyComparator& GetPlainInternalComparator(
|
|
|
|
const Comparator* comp) {
|
|
|
|
if (!plain_internal_comparator) {
|
|
|
|
plain_internal_comparator.reset(
|
|
|
|
new test::PlainInternalKeyComparator(comp));
|
|
|
|
}
|
|
|
|
return *plain_internal_comparator;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<InternalKeyComparator> plain_internal_comparator;
|
|
|
|
};
|
|
|
|
|
|
|
|
class GeneralTableTest : public TableTest {};
|
|
|
|
class BlockBasedTableTest : public TableTest {};
|
|
|
|
class PlainTableTest : public TableTest {};
|
2015-03-17 22:08:00 +01:00
|
|
|
class TablePropertyTest : public testing::Test {};
|
2014-02-12 22:14:59 +01:00
|
|
|
|
|
|
|
// This test serves as the living tutorial for the prefix scan of user collected
|
|
|
|
// properties.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(TablePropertyTest, PrefixScanTest) {
|
2014-02-12 22:14:59 +01:00
|
|
|
UserCollectedProperties props{{"num.111.1", "1"},
|
|
|
|
{"num.111.2", "2"},
|
|
|
|
{"num.111.3", "3"},
|
|
|
|
{"num.333.1", "1"},
|
|
|
|
{"num.333.2", "2"},
|
|
|
|
{"num.333.3", "3"},
|
|
|
|
{"num.555.1", "1"},
|
|
|
|
{"num.555.2", "2"},
|
|
|
|
{"num.555.3", "3"}, };
|
|
|
|
|
|
|
|
// prefixes that exist
|
|
|
|
for (const std::string& prefix : {"num.111", "num.333", "num.555"}) {
|
|
|
|
int num = 0;
|
|
|
|
for (auto pos = props.lower_bound(prefix);
|
|
|
|
pos != props.end() &&
|
|
|
|
pos->first.compare(0, prefix.size(), prefix) == 0;
|
|
|
|
++pos) {
|
|
|
|
++num;
|
2014-11-25 05:44:49 +01:00
|
|
|
auto key = prefix + "." + ToString(num);
|
2014-02-12 22:14:59 +01:00
|
|
|
ASSERT_EQ(key, pos->first);
|
2014-11-25 05:44:49 +01:00
|
|
|
ASSERT_EQ(ToString(num), pos->second);
|
2014-02-12 22:14:59 +01:00
|
|
|
}
|
|
|
|
ASSERT_EQ(3, num);
|
|
|
|
}
|
|
|
|
|
|
|
|
// prefixes that don't exist
|
|
|
|
for (const std::string& prefix :
|
|
|
|
{"num.000", "num.222", "num.444", "num.666"}) {
|
|
|
|
auto pos = props.lower_bound(prefix);
|
|
|
|
ASSERT_TRUE(pos == props.end() ||
|
|
|
|
pos->first.compare(0, prefix.size(), prefix) != 0);
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
// This test include all the basic checks except those for index size and block
|
|
|
|
// size, which will be conducted in separated unit tests.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, BasicBlockBasedTableProperties) {
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor c(BytewiseComparator());
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
c.Add("a1", "val1");
|
|
|
|
c.Add("b2", "val2");
|
|
|
|
c.Add("c3", "val3");
|
|
|
|
c.Add("d4", "val4");
|
|
|
|
c.Add("e5", "val5");
|
|
|
|
c.Add("f6", "val6");
|
|
|
|
c.Add("g7", "val7");
|
|
|
|
c.Add("h8", "val8");
|
|
|
|
c.Add("j9", "val9");
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2013-10-10 20:43:24 +02:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
2013-11-20 01:29:42 +01:00
|
|
|
ASSERT_EQ(kvmap.size(), props.num_entries);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
auto raw_key_size = kvmap.size() * 2ul;
|
|
|
|
auto raw_value_size = kvmap.size() * 4ul;
|
|
|
|
|
2013-11-20 01:29:42 +01:00
|
|
|
ASSERT_EQ(raw_key_size, props.raw_key_size);
|
|
|
|
ASSERT_EQ(raw_value_size, props.raw_value_size);
|
|
|
|
ASSERT_EQ(1ul, props.num_data_blocks);
|
|
|
|
ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
// Verify data size.
|
2014-09-02 20:49:38 +02:00
|
|
|
BlockBuilder block_builder(1);
|
2013-10-10 20:43:24 +02:00
|
|
|
for (const auto& item : kvmap) {
|
|
|
|
block_builder.Add(item.first, item.second);
|
|
|
|
}
|
|
|
|
Slice content = block_builder.Finish();
|
2014-02-05 01:21:47 +01:00
|
|
|
ASSERT_EQ(content.size() + kBlockTrailerSize, props.data_size);
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, FilterPolicyNameProperties) {
|
2014-08-25 23:22:05 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true);
|
2013-10-17 01:57:20 +02:00
|
|
|
c.Add("a1", "val1");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2014-08-25 23:22:05 +02:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-17 01:57:20 +02:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
2013-11-20 01:29:42 +01:00
|
|
|
ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name);
|
2013-10-17 01:57:20 +02:00
|
|
|
}
|
|
|
|
|
2015-03-03 02:07:03 +01:00
|
|
|
//
|
|
|
|
// BlockBasedTableTest::PrefetchTest
|
|
|
|
//
|
|
|
|
void AssertKeysInCache(BlockBasedTable* table_reader,
|
|
|
|
const vector<string>& keys_in_cache,
|
|
|
|
const vector<string>& keys_not_in_cache) {
|
|
|
|
for (auto key : keys_in_cache) {
|
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto key : keys_not_in_cache) {
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrefetchRange(TableConstructor* c, Options* opt,
|
|
|
|
BlockBasedTableOptions* table_options,
|
|
|
|
const vector<std::string>& keys,
|
|
|
|
const char* key_begin, const char* key_end,
|
|
|
|
const vector<string>& keys_in_cache,
|
|
|
|
const vector<string>& keys_not_in_cache,
|
|
|
|
const Status expected_status = Status::OK()) {
|
|
|
|
// reset the cache and reopen the table
|
|
|
|
table_options->block_cache = NewLRUCache(16 * 1024 * 1024);
|
|
|
|
opt->table_factory.reset(NewBlockBasedTableFactory(*table_options));
|
|
|
|
const ImmutableCFOptions ioptions2(*opt);
|
|
|
|
ASSERT_OK(c->Reopen(ioptions2));
|
|
|
|
|
|
|
|
// prefetch
|
|
|
|
auto* table_reader = dynamic_cast<BlockBasedTable*>(c->GetTableReader());
|
|
|
|
// empty string replacement is a trick so we don't crash the test
|
|
|
|
Slice begin(key_begin ? key_begin : "");
|
|
|
|
Slice end(key_end ? key_end : "");
|
|
|
|
Status s = table_reader->Prefetch(key_begin ? &begin : nullptr,
|
|
|
|
key_end ? &end : nullptr);
|
|
|
|
ASSERT_TRUE(s.code() == expected_status.code());
|
|
|
|
|
|
|
|
// assert our expectation in cache warmup
|
|
|
|
AssertKeysInCache(table_reader, keys_in_cache, keys_not_in_cache);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, PrefetchTest) {
|
2015-03-03 02:07:03 +01:00
|
|
|
// The purpose of this test is to test the prefetching operation built into
|
|
|
|
// BlockBasedTable.
|
|
|
|
Options opt;
|
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
|
|
|
opt.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
// big enough so we don't ever lose cached values.
|
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024);
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
|
|
|
|
|
|
|
// We get the following data spread :
|
|
|
|
//
|
|
|
|
// Data block Index
|
|
|
|
// ========================
|
|
|
|
// [ k01 k02 k03 ] k03
|
|
|
|
// [ k04 ] k04
|
|
|
|
// [ k05 ] k05
|
|
|
|
// [ k06 k07 ] k07
|
|
|
|
|
|
|
|
|
|
|
|
// Simple
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
/*key_range=*/ "k01", "k05",
|
|
|
|
/*keys_in_cache=*/ {"k01", "k02", "k03", "k04", "k05"},
|
|
|
|
/*keys_not_in_cache=*/ {"k06", "k07"});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"k01", "k01",
|
|
|
|
{"k01", "k02", "k03"},
|
|
|
|
{"k04", "k05", "k06", "k07"});
|
|
|
|
// odd
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"a", "z",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"},
|
|
|
|
{});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"k00", "k00",
|
|
|
|
{"k01", "k02", "k03"},
|
|
|
|
{"k04", "k05", "k06", "k07"});
|
|
|
|
// Edge cases
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"k00", "k06",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"},
|
|
|
|
{});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"k00", "zzz",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"},
|
|
|
|
{});
|
|
|
|
// null keys
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
nullptr, nullptr,
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"},
|
|
|
|
{});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"k04", nullptr,
|
|
|
|
{"k04", "k05", "k06", "k07"},
|
|
|
|
{"k01", "k02", "k03"});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
nullptr, "k05",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05"},
|
|
|
|
{"k06", "k07"});
|
|
|
|
// invalid
|
|
|
|
PrefetchRange(&c, &opt, &table_options, keys,
|
|
|
|
"k06", "k00", {}, {},
|
|
|
|
Status::InvalidArgument(Slice("k06 "), Slice("k07")));
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) {
|
2014-08-26 01:14:30 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
Options options;
|
|
|
|
// Make each key/value an individual block
|
|
|
|
table_options.block_size = 64;
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
|
|
|
// Binary search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// Hash search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// Hash search index with hash_index_allow_collision
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.hash_index_allow_collision = true;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default:
|
|
|
|
// Hash search index with filter policy
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator(), true);
|
|
|
|
c.Add("aaaa1", std::string('a', 56));
|
|
|
|
c.Add("bbaa1", std::string('a', 56));
|
|
|
|
c.Add("cccc1", std::string('a', 56));
|
|
|
|
c.Add("bbbb1", std::string('a', 56));
|
|
|
|
c.Add("baaa1", std::string('a', 56));
|
|
|
|
c.Add("abbb1", std::string('a', 56));
|
|
|
|
c.Add("cccc2", std::string('a', 56));
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-26 01:14:30 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
auto props = c.GetTableReader()->GetTableProperties();
|
|
|
|
ASSERT_EQ(7u, props->num_data_blocks);
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.total_order_seek = true;
|
|
|
|
std::unique_ptr<Iterator> iter(reader->NewIterator(ro));
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("b", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("baaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("bb", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("bbb", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("cccc1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
static std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-09-26 18:14:05 +02:00
|
|
|
void AddInternalKey(TableConstructor* c, const std::string& prefix,
|
2014-04-10 23:19:43 +02:00
|
|
|
int suffix_len = 800) {
|
|
|
|
static Random rnd(1023);
|
|
|
|
InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue);
|
|
|
|
c->Add(k.Encode().ToString(), "v");
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(TableTest, HashIndexTest) {
|
2014-04-10 23:19:43 +02:00
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
|
|
|
|
// keys with prefix length 3, make sure the key/value is big enough to fill
|
|
|
|
// one block
|
|
|
|
AddInternalKey(&c, "0015");
|
|
|
|
AddInternalKey(&c, "0035");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0054");
|
|
|
|
AddInternalKey(&c, "0055");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0056");
|
|
|
|
AddInternalKey(&c, "0057");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0058");
|
|
|
|
AddInternalKey(&c, "0075");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0076");
|
|
|
|
AddInternalKey(&c, "0095");
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
|
|
|
Options options;
|
2014-08-25 23:22:05 +02:00
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(3));
|
2014-04-10 23:19:43 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
2014-06-13 04:03:22 +02:00
|
|
|
table_options.hash_index_allow_collision = true;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_size = 1700;
|
|
|
|
table_options.block_cache = NewLRUCache(1024);
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
std::unique_ptr<InternalKeyComparator> comparator(
|
|
|
|
new InternalKeyComparator(BytewiseComparator()));
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto reader = c.GetTableReader();
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
auto props = reader->GetTableProperties();
|
2014-04-10 23:19:43 +02:00
|
|
|
ASSERT_EQ(5u, props->num_data_blocks);
|
|
|
|
|
|
|
|
std::unique_ptr<Iterator> hash_iter(reader->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
// -- Find keys do not exist, but have common prefix.
|
|
|
|
std::vector<std::string> prefixes = {"001", "003", "005", "007", "009"};
|
|
|
|
std::vector<std::string> lower_bound = {keys[0], keys[1], keys[2],
|
|
|
|
keys[7], keys[9], };
|
|
|
|
|
|
|
|
// find the lower bound of the prefix
|
|
|
|
for (size_t i = 0; i < prefixes.size(); ++i) {
|
|
|
|
hash_iter->Seek(InternalKey(prefixes[i], 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(hash_iter->status());
|
|
|
|
ASSERT_TRUE(hash_iter->Valid());
|
|
|
|
|
|
|
|
// seek the first element in the block
|
|
|
|
ASSERT_EQ(lower_bound[i], hash_iter->key().ToString());
|
|
|
|
ASSERT_EQ("v", hash_iter->value().ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the upper bound of prefixes
|
|
|
|
std::vector<std::string> upper_bound = {keys[1], keys[2], keys[7], keys[9], };
|
|
|
|
|
|
|
|
// find existing keys
|
|
|
|
for (const auto& item : kvmap) {
|
|
|
|
auto ukey = ExtractUserKey(item.first).ToString();
|
|
|
|
hash_iter->Seek(ukey);
|
|
|
|
|
|
|
|
// ASSERT_OK(regular_iter->status());
|
|
|
|
ASSERT_OK(hash_iter->status());
|
|
|
|
|
|
|
|
// ASSERT_TRUE(regular_iter->Valid());
|
|
|
|
ASSERT_TRUE(hash_iter->Valid());
|
|
|
|
|
|
|
|
ASSERT_EQ(item.first, hash_iter->key().ToString());
|
|
|
|
ASSERT_EQ(item.second, hash_iter->value().ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < prefixes.size(); ++i) {
|
|
|
|
// the key is greater than any existing keys.
|
|
|
|
auto key = prefixes[i] + "9";
|
|
|
|
hash_iter->Seek(InternalKey(key, 0, kTypeValue).Encode());
|
|
|
|
|
|
|
|
ASSERT_OK(hash_iter->status());
|
|
|
|
if (i == prefixes.size() - 1) {
|
|
|
|
// last key
|
|
|
|
ASSERT_TRUE(!hash_iter->Valid());
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(hash_iter->Valid());
|
|
|
|
// seek the first element in the block
|
|
|
|
ASSERT_EQ(upper_bound[i], hash_iter->key().ToString());
|
|
|
|
ASSERT_EQ("v", hash_iter->value().ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find keys with prefix that don't match any of the existing prefixes.
|
|
|
|
std::vector<std::string> non_exist_prefixes = {"002", "004", "006", "008"};
|
|
|
|
for (const auto& prefix : non_exist_prefixes) {
|
|
|
|
hash_iter->Seek(InternalKey(prefix, 0, kTypeValue).Encode());
|
|
|
|
// regular_iter->Seek(prefix);
|
|
|
|
|
|
|
|
ASSERT_OK(hash_iter->status());
|
2014-06-13 04:03:22 +02:00
|
|
|
// Seek to non-existing prefixes should yield either invalid, or a
|
|
|
|
// key with prefix greater than the target.
|
|
|
|
if (hash_iter->Valid()) {
|
|
|
|
Slice ukey = ExtractUserKey(hash_iter->key());
|
|
|
|
Slice ukey_prefix = options.prefix_extractor->Transform(ukey);
|
|
|
|
ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) < 0);
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
// It's very hard to figure out the index block size of a block accurately.
|
|
|
|
// To make sure we get the index size, we just make sure as key number
|
|
|
|
// grows, the filter block size also grows.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, IndexSizeStat) {
|
2013-10-10 20:43:24 +02:00
|
|
|
uint64_t last_index_size = 0;
|
|
|
|
|
|
|
|
// we need to use random keys since the pure human readable texts
|
|
|
|
// may be well compressed, resulting insignifcant change of index
|
|
|
|
// block size.
|
|
|
|
Random rnd(test::RandomSeed());
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
|
|
|
|
for (int i = 0; i < 100; ++i) {
|
|
|
|
keys.push_back(RandomString(&rnd, 10000));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Each time we load one more key to the table. the table index block
|
|
|
|
// size is expected to be larger than last time's.
|
|
|
|
for (size_t i = 1; i < keys.size(); ++i) {
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor c(BytewiseComparator());
|
2013-10-10 20:43:24 +02:00
|
|
|
for (size_t j = 0; j < i; ++j) {
|
|
|
|
c.Add(keys[j], "val");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> ks;
|
|
|
|
KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2013-10-10 20:43:24 +02:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto index_size = c.GetTableReader()->GetTableProperties()->index_size;
|
2013-10-10 20:43:24 +02:00
|
|
|
ASSERT_GT(index_size, last_index_size);
|
|
|
|
last_index_size = index_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, NumBlockStat) {
|
2013-10-10 20:43:24 +02:00
|
|
|
Random rnd(test::RandomSeed());
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor c(BytewiseComparator());
|
2013-10-10 20:43:24 +02:00
|
|
|
Options options;
|
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
table_options.block_size = 1000;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
// the key/val are slightly smaller than block size, so that each block
|
|
|
|
// holds roughly one key/value pair.
|
|
|
|
c.Add(RandomString(&rnd, 900), "val");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> ks;
|
|
|
|
KVMap kvmap;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
|
2014-02-05 01:21:47 +01:00
|
|
|
ASSERT_EQ(kvmap.size(),
|
2014-08-26 01:14:30 +02:00
|
|
|
c.GetTableReader()->GetTableProperties()->num_data_blocks);
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
// A simple tool that takes the snapshot of block cache statistics.
|
|
|
|
class BlockCachePropertiesSnapshot {
|
2013-11-13 07:46:51 +01:00
|
|
|
public:
|
2014-02-20 00:38:57 +01:00
|
|
|
explicit BlockCachePropertiesSnapshot(Statistics* statistics) {
|
2014-01-17 21:46:06 +01:00
|
|
|
block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_MISS);
|
|
|
|
block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_HIT);
|
|
|
|
index_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
|
|
|
|
index_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
|
|
|
|
data_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
|
|
|
|
data_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
|
2014-02-20 00:38:57 +01:00
|
|
|
filter_block_cache_miss =
|
|
|
|
statistics->getTickerCount(BLOCK_CACHE_FILTER_MISS);
|
|
|
|
filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT);
|
|
|
|
}
|
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
void AssertIndexBlockStat(int64_t expected_index_block_cache_miss,
|
|
|
|
int64_t expected_index_block_cache_hit) {
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
2014-02-20 00:38:57 +01:00
|
|
|
}
|
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss,
|
|
|
|
int64_t expected_filter_block_cache_hit) {
|
|
|
|
ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2013-11-20 01:29:42 +01:00
|
|
|
// Check if the fetched props matches the expected ones.
|
2014-02-20 00:38:57 +01:00
|
|
|
// TODO(kailiu) Use this only when you disabled filter policy!
|
2014-10-31 19:59:54 +01:00
|
|
|
void AssertEqual(int64_t expected_index_block_cache_miss,
|
|
|
|
int64_t expected_index_block_cache_hit,
|
|
|
|
int64_t expected_data_block_cache_miss,
|
|
|
|
int64_t expected_data_block_cache_hit) const {
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
|
|
|
ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss,
|
|
|
|
block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit,
|
|
|
|
block_cache_hit);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2014-02-05 01:21:47 +01:00
|
|
|
int64_t block_cache_miss = 0;
|
|
|
|
int64_t block_cache_hit = 0;
|
|
|
|
int64_t index_block_cache_miss = 0;
|
|
|
|
int64_t index_block_cache_hit = 0;
|
|
|
|
int64_t data_block_cache_miss = 0;
|
|
|
|
int64_t data_block_cache_hit = 0;
|
2014-02-20 00:38:57 +01:00
|
|
|
int64_t filter_block_cache_miss = 0;
|
|
|
|
int64_t filter_block_cache_hit = 0;
|
2013-11-13 07:46:51 +01:00
|
|
|
};
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
// Make sure, by default, index/filter blocks were pre-loaded (meaning we won't
|
|
|
|
// use block cache to store them).
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, BlockCacheDisabledTest) {
|
2014-02-20 00:38:57 +01:00
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
BlockBasedTableOptions table_options;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1024);
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
2014-02-20 00:38:57 +01:00
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true);
|
2014-02-20 00:38:57 +01:00
|
|
|
c.Add("key", "value");
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-02-20 00:38:57 +01:00
|
|
|
|
|
|
|
// preloading filter/index blocks is enabled.
|
2014-08-26 01:14:30 +02:00
|
|
|
auto reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-02-20 00:38:57 +01:00
|
|
|
ASSERT_TRUE(reader->TEST_filter_block_preloaded());
|
2014-03-01 03:19:07 +01:00
|
|
|
ASSERT_TRUE(reader->TEST_index_reader_preloaded());
|
2014-02-20 00:38:57 +01:00
|
|
|
|
|
|
|
{
|
|
|
|
// nothing happens in the beginning
|
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertIndexBlockStat(0, 0);
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2014-09-29 20:09:09 +02:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
2015-03-03 19:59:36 +01:00
|
|
|
GetContext::kNotFound, Slice(), nullptr, nullptr,
|
2014-09-29 20:09:09 +02:00
|
|
|
nullptr, nullptr);
|
2014-02-20 00:38:57 +01:00
|
|
|
// a hack that just to trigger BlockBasedTable::GetFilter.
|
2014-09-29 20:09:09 +02:00
|
|
|
reader->Get(ReadOptions(), "non-exist-key", &get_context);
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertIndexBlockStat(0, 0);
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Due to the difficulities of the intersaction between statistics, this test
|
|
|
|
// only tests the case when "index block is put to block cache"
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
2013-11-13 07:46:51 +01:00
|
|
|
// -- Table construction
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2013-11-13 07:46:51 +01:00
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = CreateDBStatistics();
|
2014-01-24 19:57:15 +01:00
|
|
|
|
|
|
|
// Enable the cache for index/filter blocks
|
|
|
|
BlockBasedTableOptions table_options;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1024);
|
2014-01-24 19:57:15 +01:00
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2013-11-13 07:46:51 +01:00
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor c(BytewiseComparator());
|
2013-11-13 07:46:51 +01:00
|
|
|
c.Add("key", "value");
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-02-20 00:38:57 +01:00
|
|
|
// preloading filter/index blocks is prohibited.
|
2014-10-22 20:52:35 +02:00
|
|
|
auto* reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-02-20 00:38:57 +01:00
|
|
|
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
|
2014-03-01 03:19:07 +01:00
|
|
|
ASSERT_TRUE(!reader->TEST_index_reader_preloaded());
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
// -- PART 1: Open with regular block cache.
|
|
|
|
// Since block_cache is disabled, no cache activities will be involved.
|
|
|
|
unique_ptr<Iterator> iter;
|
|
|
|
|
|
|
|
// At first, no block will be accessed.
|
|
|
|
{
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2013-11-13 07:46:51 +01:00
|
|
|
// index will be added to block cache.
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, // index block miss
|
|
|
|
0, 0, 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only index block will be accessed
|
|
|
|
{
|
|
|
|
iter.reset(c.NewIterator());
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2013-11-13 07:46:51 +01:00
|
|
|
// NOTE: to help better highlight the "detla" of each ticker, I use
|
|
|
|
// <last_value> + <added_value> to indicate the increment of changed
|
|
|
|
// value; other numbers remain the same.
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, 0 + 1, // index block hit
|
|
|
|
0, 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only data block will be accessed
|
|
|
|
{
|
|
|
|
iter->SeekToFirst();
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, 1, 0 + 1, // data block miss
|
|
|
|
0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Data block will be in cache
|
|
|
|
{
|
|
|
|
iter.reset(c.NewIterator());
|
|
|
|
iter->SeekToFirst();
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, 1 + 1, /* index block hit */
|
|
|
|
1, 0 + 1 /* data block hit */);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
// release the iterator so that the block cache can reset correctly.
|
|
|
|
iter.reset();
|
|
|
|
|
2014-10-22 20:52:35 +02:00
|
|
|
// -- PART 2: Open with very small block cache
|
2013-11-13 07:46:51 +01:00
|
|
|
// In this test, no block will ever get hit since the block cache is
|
|
|
|
// too small to fit even one entry.
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1);
|
2014-10-22 20:52:35 +02:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-08-25 23:22:05 +02:00
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions2(options);
|
|
|
|
c.Reopen(ioptions2);
|
2013-11-13 07:46:51 +01:00
|
|
|
{
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, // index block miss
|
|
|
|
0, 0, 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Both index and data block get accessed.
|
|
|
|
// It first cache index block then data block. But since the cache size
|
|
|
|
// is only 1, index block will be purged after data block is inserted.
|
|
|
|
iter.reset(c.NewIterator());
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1 + 1, // index block miss
|
|
|
|
0, 0, // data block miss
|
|
|
|
0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// SeekToFirst() accesses data block. With similar reason, we expect data
|
|
|
|
// block's cache miss.
|
|
|
|
iter->SeekToFirst();
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(2, 0, 0 + 1, // data block miss
|
|
|
|
0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2014-10-22 20:52:35 +02:00
|
|
|
iter.reset();
|
|
|
|
|
|
|
|
// -- PART 3: Open table with bloom filter enabled but not in SST file
|
|
|
|
table_options.block_cache = NewLRUCache(4096);
|
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c3(BytewiseComparator());
|
2014-10-22 22:53:35 +02:00
|
|
|
std::string user_key = "k01";
|
|
|
|
InternalKey internal_key(user_key, 0, kTypeValue);
|
|
|
|
c3.Add(internal_key.Encode().ToString(), "hello");
|
2014-10-22 20:52:35 +02:00
|
|
|
ImmutableCFOptions ioptions3(options);
|
|
|
|
// Generate table without filter policy
|
|
|
|
c3.Finish(options, ioptions3, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
// Open table with filter policy
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(1));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
ImmutableCFOptions ioptions4(options);
|
|
|
|
ASSERT_OK(c3.Reopen(ioptions4));
|
|
|
|
reader = dynamic_cast<BlockBasedTable*>(c3.GetTableReader());
|
|
|
|
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
|
2014-10-22 22:53:35 +02:00
|
|
|
std::string value;
|
2014-10-22 20:52:35 +02:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
2015-03-03 19:59:36 +01:00
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2014-10-22 20:52:35 +02:00
|
|
|
nullptr, nullptr);
|
2014-10-22 22:53:35 +02:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), user_key, &get_context));
|
|
|
|
ASSERT_EQ(value, "hello");
|
2014-10-22 20:52:35 +02:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, BlockCacheLeak) {
|
2014-01-24 21:14:08 +01:00
|
|
|
// Check that when we reopen a table we don't lose access to blocks already
|
|
|
|
// in the cache. This test checks whether the Table actually makes use of the
|
|
|
|
// unique ID from the file.
|
|
|
|
|
|
|
|
Options opt;
|
2014-01-27 22:53:22 +01:00
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
2014-01-24 21:14:08 +01:00
|
|
|
opt.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
// big enough so we don't ever lose cached values.
|
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024);
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-01-24 21:14:08 +01:00
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
2014-01-24 21:14:08 +01:00
|
|
|
|
|
|
|
unique_ptr<Iterator> iter(c.NewIterator());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
while (iter->Valid()) {
|
|
|
|
iter->key();
|
|
|
|
iter->value();
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions1(opt);
|
|
|
|
ASSERT_OK(c.Reopen(ioptions1));
|
2014-08-26 01:14:30 +02:00
|
|
|
auto table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-01-25 06:10:19 +01:00
|
|
|
for (const std::string& key : keys) {
|
2014-01-28 06:58:46 +01:00
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
|
2014-01-24 21:14:08 +01:00
|
|
|
}
|
2014-06-20 10:23:02 +02:00
|
|
|
|
|
|
|
// rerun with different block cache
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024);
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions2(opt);
|
|
|
|
ASSERT_OK(c.Reopen(ioptions2));
|
2014-08-26 01:14:30 +02:00
|
|
|
table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-06-20 10:23:02 +02:00
|
|
|
for (const std::string& key : keys) {
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
2014-01-24 21:14:08 +01:00
|
|
|
}
|
|
|
|
|
2015-07-20 20:09:14 +02:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(PlainTableTest, BasicPlainTableProperties) {
|
2014-07-18 09:08:38 +02:00
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 8;
|
|
|
|
plain_table_options.bloom_bits_per_key = 8;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
PlainTableFactory factory(plain_table_options);
|
2015-08-05 16:33:27 +02:00
|
|
|
test::StringSink sink;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<WritableFileWriter> file_writer(
|
2015-08-05 16:33:27 +02:00
|
|
|
test::GetWritableFileWriter(new test::StringSink()));
|
2014-01-27 22:53:22 +01:00
|
|
|
Options options;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
2014-01-27 22:53:22 +01:00
|
|
|
InternalKeyComparator ikc(options.comparator);
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
|
|
|
std::unique_ptr<TableBuilder> builder(factory.NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
|
|
|
|
kNoCompression, CompressionOptions(), false),
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer.get()));
|
2014-01-24 21:14:08 +01:00
|
|
|
|
|
|
|
for (char c = 'a'; c <= 'z'; ++c) {
|
2014-01-27 22:53:22 +01:00
|
|
|
std::string key(8, c);
|
|
|
|
key.append("\1 "); // PlainTable expects internal key structure
|
2014-01-24 21:14:08 +01:00
|
|
|
std::string value(28, c + 42);
|
|
|
|
builder->Add(key, value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(builder->Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer->Flush();
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2015-08-05 16:33:27 +02:00
|
|
|
test::StringSink* ss =
|
|
|
|
static_cast<test::StringSink*>(file_writer->writable_file());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
2015-08-05 16:33:27 +02:00
|
|
|
new test::StringSource(ss->contents(), 72242, true)));
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
TableProperties* props = nullptr;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
auto s = ReadTableProperties(file_reader.get(), ss->contents().size(),
|
2014-01-25 06:10:19 +01:00
|
|
|
kPlainTableMagicNumber, Env::Default(), nullptr,
|
|
|
|
&props);
|
2014-02-08 07:43:58 +01:00
|
|
|
std::unique_ptr<TableProperties> props_guard(props);
|
2014-01-24 21:14:08 +01:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
ASSERT_EQ(0ul, props->index_size);
|
|
|
|
ASSERT_EQ(0ul, props->filter_size);
|
|
|
|
ASSERT_EQ(16ul * 26, props->raw_key_size);
|
|
|
|
ASSERT_EQ(28ul * 26, props->raw_value_size);
|
|
|
|
ASSERT_EQ(26ul, props->num_entries);
|
|
|
|
ASSERT_EQ(1ul, props->num_data_blocks);
|
2014-01-24 21:14:08 +01:00
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) {
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor c(BytewiseComparator());
|
2011-03-18 23:37:00 +01:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2014-01-27 22:53:22 +01:00
|
|
|
test::PlainInternalKeyComparator internal_comparator(options.comparator);
|
2011-03-18 23:37:00 +01:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator,
|
|
|
|
&keys, &kvmap);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
|
2012-04-17 17:36:46 +02:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
static void DoCompressionTest(CompressionType comp) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Random rnd(301);
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor c(BytewiseComparator());
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string tmp;
|
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
|
|
|
|
c.Add("k03", "hello3");
|
|
|
|
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2014-01-27 22:53:22 +01:00
|
|
|
test::PlainInternalKeyComparator ikc(options.comparator);
|
2012-06-28 08:41:33 +02:00
|
|
|
options.compression = comp;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, ikc, &keys, &kvmap);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000));
|
2013-12-20 18:35:24 +01:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6100));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) {
|
2014-02-11 02:02:02 +01:00
|
|
|
std::vector<CompressionType> compression_state;
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!Snappy_Supported()) {
|
2012-06-28 08:41:33 +02:00
|
|
|
fprintf(stderr, "skipping snappy compression tests\n");
|
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kSnappyCompression);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!Zlib_Supported()) {
|
2012-06-28 08:41:33 +02:00
|
|
|
fprintf(stderr, "skipping zlib compression tests\n");
|
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kZlibCompression);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
|
2014-02-11 02:02:02 +01:00
|
|
|
// TODO(kailiu) DoCompressionTest() doesn't work with BZip2.
|
|
|
|
/*
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!BZip2_Supported()) {
|
2014-02-08 03:12:30 +01:00
|
|
|
fprintf(stderr, "skipping bzip2 compression tests\n");
|
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kBZip2Compression);
|
2014-02-08 03:12:30 +01:00
|
|
|
}
|
2014-02-11 02:02:02 +01:00
|
|
|
*/
|
2014-02-08 03:12:30 +01:00
|
|
|
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!LZ4_Supported()) {
|
|
|
|
fprintf(stderr, "skipping lz4 and lz4hc compression tests\n");
|
2014-02-08 03:12:30 +01:00
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kLZ4Compression);
|
|
|
|
compression_state.push_back(kLZ4HCCompression);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
|
2014-02-11 02:02:02 +01:00
|
|
|
for (auto state : compression_state) {
|
|
|
|
DoCompressionTest(state);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, Randomized) {
|
2013-11-20 07:00:48 +01:00
|
|
|
std::vector<TestArgs> args = GenerateArgList();
|
2013-11-10 10:17:32 +01:00
|
|
|
for (unsigned int i = 0; i < args.size(); i++) {
|
|
|
|
Init(args[i]);
|
|
|
|
Random rnd(test::RandomSeed() + 5);
|
|
|
|
for (int num_entries = 0; num_entries < 2000;
|
|
|
|
num_entries += (num_entries < 50 ? 1 : 200)) {
|
|
|
|
if ((num_entries % 10) == 0) {
|
2014-02-05 01:21:47 +01:00
|
|
|
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
|
|
|
|
static_cast<int>(args.size()), num_entries);
|
2013-11-10 10:17:32 +01:00
|
|
|
}
|
|
|
|
for (int e = 0; e < num_entries; e++) {
|
|
|
|
std::string v;
|
|
|
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
|
|
|
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
|
|
}
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, RandomizedLongDB) {
|
2013-11-10 10:17:32 +01:00
|
|
|
Random rnd(test::RandomSeed());
|
2015-02-20 01:42:22 +01:00
|
|
|
TestArgs args = { DB_TEST, false, 16, kNoCompression, 0 };
|
2013-11-10 10:17:32 +01:00
|
|
|
Init(args);
|
|
|
|
int num_entries = 100000;
|
|
|
|
for (int e = 0; e < num_entries; e++) {
|
|
|
|
std::string v;
|
|
|
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
|
|
|
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
|
|
}
|
|
|
|
Test(&rnd);
|
|
|
|
|
|
|
|
// We must have created enough data to force merging
|
|
|
|
int files = 0;
|
|
|
|
for (int level = 0; level < db()->NumberLevels(); level++) {
|
|
|
|
std::string value;
|
|
|
|
char name[100];
|
|
|
|
snprintf(name, sizeof(name), "rocksdb.num-files-at-level%d", level);
|
|
|
|
ASSERT_TRUE(db()->GetProperty(name, &value));
|
|
|
|
files += atoi(value.c_str());
|
|
|
|
}
|
|
|
|
ASSERT_GT(files, 0);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class MemTableTest : public testing::Test {};
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(MemTableTest, Simple) {
|
2013-11-10 10:17:32 +01:00
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
auto table_factory = std::make_shared<SkipListFactory>();
|
2014-01-15 00:32:37 +01:00
|
|
|
Options options;
|
|
|
|
options.memtable_factory = table_factory;
|
2014-10-02 01:19:16 +02:00
|
|
|
ImmutableCFOptions ioptions(options);
|
2014-12-02 21:09:20 +01:00
|
|
|
WriteBuffer wb(options.db_write_buffer_size);
|
2015-03-03 19:59:36 +01:00
|
|
|
MemTable* memtable =
|
2015-05-29 23:36:35 +02:00
|
|
|
new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb,
|
|
|
|
kMaxSequenceNumber);
|
2013-11-10 10:17:32 +01:00
|
|
|
memtable->Ref();
|
|
|
|
WriteBatch batch;
|
|
|
|
WriteBatchInternal::SetSequence(&batch, 100);
|
|
|
|
batch.Put(std::string("k1"), std::string("v1"));
|
|
|
|
batch.Put(std::string("k2"), std::string("v2"));
|
|
|
|
batch.Put(std::string("k3"), std::string("v3"));
|
|
|
|
batch.Put(std::string("largekey"), std::string("vlarge"));
|
2014-11-18 19:20:10 +01:00
|
|
|
ColumnFamilyMemTablesDefault cf_mems_default(memtable);
|
2014-02-06 01:02:48 +01:00
|
|
|
ASSERT_TRUE(WriteBatchInternal::InsertInto(&batch, &cf_mems_default).ok());
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
Arena arena;
|
|
|
|
ScopedArenaIterator iter(memtable->NewIterator(ReadOptions(), &arena));
|
2013-11-10 10:17:32 +01:00
|
|
|
iter->SeekToFirst();
|
|
|
|
while (iter->Valid()) {
|
|
|
|
fprintf(stderr, "key: '%s' -> '%s'\n",
|
|
|
|
iter->key().ToString().c_str(),
|
|
|
|
iter->value().ToString().c_str());
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
|
2013-12-02 06:23:44 +01:00
|
|
|
delete memtable->Unref();
|
2013-11-10 10:17:32 +01:00
|
|
|
}
|
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
// Test the empty key
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleEmptyKey) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 1);
|
|
|
|
Add("", "v");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleSingle) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 2);
|
|
|
|
Add("abc", "v");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleMulti) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 3);
|
|
|
|
Add("abc", "v");
|
|
|
|
Add("abcd", "v");
|
|
|
|
Add("ac", "v2");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleSpecialKey) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 4);
|
|
|
|
Add("\xff\xff", "v3");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, FooterTests) {
|
2014-05-01 20:09:32 +02:00
|
|
|
{
|
|
|
|
// upconvert legacy block based
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kLegacyBlockBasedTableMagicNumber, 0);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 0U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// xxhash block based
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kBlockBasedTableMagicNumber, 1);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.set_checksum(kxxHash);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 1U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2014-05-01 20:09:32 +02:00
|
|
|
{
|
|
|
|
// upconvert legacy plain table
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kLegacyPlainTableMagicNumber, 0);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 0U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// xxhash block based
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kPlainTableMagicNumber, 1);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.set_checksum(kxxHash);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 1U);
|
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2015-01-13 23:33:04 +01:00
|
|
|
{
|
|
|
|
// version == 2
|
|
|
|
std::string encoded;
|
|
|
|
Footer footer(kBlockBasedTableMagicNumber, 2);
|
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.version(), 2U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 22:08:00 +01:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|