2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2014-02-05 01:21:47 +01:00
|
|
|
|
|
|
|
#include <stdio.h>
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
#include <algorithm>
|
2014-06-13 04:03:22 +02:00
|
|
|
#include <iostream>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <map>
|
2013-07-23 23:42:27 +02:00
|
|
|
#include <memory>
|
2015-09-02 22:58:22 +02:00
|
|
|
#include <string>
|
2013-10-10 20:43:24 +02:00
|
|
|
#include <vector>
|
|
|
|
|
2017-05-06 05:10:56 +02:00
|
|
|
#include "cache/lru_cache.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/memtable.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2015-10-16 23:10:33 +02:00
|
|
|
#include "memtable/stl_wrappers.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "monitoring/statistics.h"
|
2017-02-06 23:43:55 +01:00
|
|
|
#include "port/port.h"
|
2013-11-13 07:46:51 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
|
|
|
#include "rocksdb/memtablerep.h"
|
2015-09-03 00:36:47 +02:00
|
|
|
#include "rocksdb/perf_context.h"
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "rocksdb/slice_transform.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
2016-06-21 03:01:03 +02:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2014-01-28 06:58:46 +01:00
|
|
|
#include "table/block.h"
|
2013-11-13 07:46:51 +01:00
|
|
|
#include "table/block_based_table_builder.h"
|
2013-11-20 07:00:48 +01:00
|
|
|
#include "table/block_based_table_factory.h"
|
2013-11-13 07:46:51 +01:00
|
|
|
#include "table/block_based_table_reader.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block_builder.h"
|
|
|
|
#include "table/format.h"
|
2015-09-02 22:58:22 +02:00
|
|
|
#include "table/get_context.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2014-01-28 06:58:46 +01:00
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "table/plain_table_factory.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/scoped_arena_iterator.h"
|
2016-10-19 01:59:37 +02:00
|
|
|
#include "table/sst_file_writer_collectors.h"
|
2015-01-09 22:04:06 +01:00
|
|
|
#include "util/compression.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/random.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2016-08-24 03:20:41 +02:00
|
|
|
#include "util/sync_point.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
2016-04-21 19:16:28 +02:00
|
|
|
#include "utilities/merge_operators.h"
|
2015-03-03 02:07:03 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
|
|
|
2013-11-08 06:27:21 +01:00
|
|
|
namespace {
|
2014-01-24 20:09:04 +01:00
|
|
|
|
2016-04-21 19:16:28 +02:00
|
|
|
// DummyPropertiesCollector used to test BlockBasedTableProperties
|
|
|
|
class DummyPropertiesCollector : public TablePropertiesCollector {
|
|
|
|
public:
|
|
|
|
const char* Name() const { return ""; }
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
Status Finish(UserCollectedProperties* /*properties*/) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2016-04-21 19:16:28 +02:00
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
Status Add(const Slice& /*user_key*/, const Slice& /*value*/) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2016-04-21 19:16:28 +02:00
|
|
|
|
|
|
|
virtual UserCollectedProperties GetReadableProperties() const {
|
|
|
|
return UserCollectedProperties{};
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class DummyPropertiesCollectorFactory1
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
|
|
|
virtual TablePropertiesCollector* CreateTablePropertiesCollector(
|
2018-03-05 22:08:17 +01:00
|
|
|
TablePropertiesCollectorFactory::Context /*context*/) {
|
2016-04-21 19:16:28 +02:00
|
|
|
return new DummyPropertiesCollector();
|
|
|
|
}
|
|
|
|
const char* Name() const { return "DummyPropertiesCollector1"; }
|
|
|
|
};
|
|
|
|
|
|
|
|
class DummyPropertiesCollectorFactory2
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
|
|
|
virtual TablePropertiesCollector* CreateTablePropertiesCollector(
|
2018-03-05 22:08:17 +01:00
|
|
|
TablePropertiesCollectorFactory::Context /*context*/) {
|
2016-04-21 19:16:28 +02:00
|
|
|
return new DummyPropertiesCollector();
|
|
|
|
}
|
|
|
|
const char* Name() const { return "DummyPropertiesCollector2"; }
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return reverse of "key".
|
|
|
|
// Used to test non-lexicographic comparators.
|
2014-01-24 20:09:04 +01:00
|
|
|
std::string Reverse(const Slice& key) {
|
|
|
|
auto rev = key.ToString();
|
|
|
|
std::reverse(rev.begin(), rev.end());
|
2011-03-18 23:37:00 +01:00
|
|
|
return rev;
|
|
|
|
}
|
|
|
|
|
|
|
|
class ReverseKeyComparator : public Comparator {
|
|
|
|
public:
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual const char* Name() const override {
|
2013-10-05 07:32:05 +02:00
|
|
|
return "rocksdb.ReverseBytewiseComparator";
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual int Compare(const Slice& a, const Slice& b) const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void FindShortestSeparator(std::string* start,
|
|
|
|
const Slice& limit) const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string s = Reverse(*start);
|
|
|
|
std::string l = Reverse(limit);
|
|
|
|
BytewiseComparator()->FindShortestSeparator(&s, l);
|
|
|
|
*start = Reverse(s);
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void FindShortSuccessor(std::string* key) const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string s = Reverse(*key);
|
|
|
|
BytewiseComparator()->FindShortSuccessor(&s);
|
|
|
|
*key = Reverse(s);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
ReverseKeyComparator reverse_key_comparator;
|
|
|
|
|
|
|
|
void Increment(const Comparator* cmp, std::string* key) {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (cmp == BytewiseComparator()) {
|
|
|
|
key->push_back('\0');
|
|
|
|
} else {
|
|
|
|
assert(cmp == &reverse_key_comparator);
|
|
|
|
std::string rev = Reverse(*key);
|
|
|
|
rev.push_back('\0');
|
|
|
|
*key = Reverse(rev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-31 18:22:06 +01:00
|
|
|
} // namespace
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Helper class for tests to unify the interface between
|
|
|
|
// BlockBuilder/TableBuilder and Block/Table.
|
|
|
|
class Constructor {
|
|
|
|
public:
|
2015-09-02 22:58:22 +02:00
|
|
|
explicit Constructor(const Comparator* cmp)
|
|
|
|
: data_(stl_wrappers::LessOfComparator(cmp)) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
virtual ~Constructor() { }
|
|
|
|
|
|
|
|
void Add(const std::string& key, const Slice& value) {
|
|
|
|
data_[key] = value.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish constructing the data structure with all the keys that have
|
|
|
|
// been added so far. Returns the keys in sorted order in "*keys"
|
|
|
|
// and stores the key/value pairs in "*kvmap"
|
2015-09-02 22:58:22 +02:00
|
|
|
void Finish(const Options& options, const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-09-02 22:58:22 +02:00
|
|
|
std::vector<std::string>* keys, stl_wrappers::KVMap* kvmap) {
|
2014-01-27 22:53:22 +01:00
|
|
|
last_internal_key_ = &internal_comparator;
|
2011-03-18 23:37:00 +01:00
|
|
|
*kvmap = data_;
|
|
|
|
keys->clear();
|
2015-09-02 22:58:22 +02:00
|
|
|
for (const auto& kv : data_) {
|
|
|
|
keys->push_back(kv.first);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
data_.clear();
|
2014-09-05 01:18:36 +02:00
|
|
|
Status s = FinishImpl(options, ioptions, table_options,
|
|
|
|
internal_comparator, *kvmap);
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_TRUE(s.ok()) << s.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the data structure from the data in "data"
|
2014-01-27 22:53:22 +01:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-09-02 22:58:22 +02:00
|
|
|
const stl_wrappers::KVMap& data) = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator() const = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-09-02 22:58:22 +02:00
|
|
|
virtual const stl_wrappers::KVMap& data() { return data_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool IsArenaMode() const { return false; }
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
|
2011-03-21 20:40:57 +01:00
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool AnywayDeleteIterator() const { return false; }
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
protected:
|
|
|
|
const InternalKeyComparator* last_internal_key_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap data_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class BlockConstructor: public Constructor {
|
|
|
|
public:
|
|
|
|
explicit BlockConstructor(const Comparator* cmp)
|
|
|
|
: Constructor(cmp),
|
|
|
|
comparator_(cmp),
|
2013-03-01 03:04:58 +01:00
|
|
|
block_(nullptr) { }
|
2011-03-18 23:37:00 +01:00
|
|
|
~BlockConstructor() {
|
|
|
|
delete block_;
|
|
|
|
}
|
2018-03-05 22:08:17 +01:00
|
|
|
virtual Status FinishImpl(
|
|
|
|
const Options& /*options*/, const ImmutableCFOptions& /*ioptions*/,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
|
|
|
const InternalKeyComparator& /*internal_comparator*/,
|
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
delete block_;
|
2013-03-01 03:04:58 +01:00
|
|
|
block_ = nullptr;
|
2014-09-02 20:49:38 +02:00
|
|
|
BlockBuilder builder(table_options.block_restart_interval);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
|
|
|
builder.Add(kv.first, kv.second);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
// Open the block
|
2012-04-17 17:36:46 +02:00
|
|
|
data_ = builder.Finish().ToString();
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = data_;
|
|
|
|
contents.cachable = false;
|
2016-10-19 01:59:37 +02:00
|
|
|
block_ = new Block(std::move(contents), kDisableGlobalSequenceNumber);
|
2011-03-18 23:37:00 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator() const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
return block_->NewIterator(comparator_);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const Comparator* comparator_;
|
2012-04-17 17:36:46 +02:00
|
|
|
std::string data_;
|
2011-03-18 23:37:00 +01:00
|
|
|
Block* block_;
|
|
|
|
|
|
|
|
BlockConstructor();
|
|
|
|
};
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
// A helper class that converts internal format keys into user keys
|
2015-10-13 00:06:38 +02:00
|
|
|
class KeyConvertingIterator : public InternalIterator {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
2015-10-13 00:06:38 +02:00
|
|
|
explicit KeyConvertingIterator(InternalIterator* iter,
|
|
|
|
bool arena_mode = false)
|
2014-09-05 02:40:41 +02:00
|
|
|
: iter_(iter), arena_mode_(arena_mode) {}
|
|
|
|
virtual ~KeyConvertingIterator() {
|
|
|
|
if (arena_mode_) {
|
2015-10-13 00:06:38 +02:00
|
|
|
iter_->~InternalIterator();
|
2014-09-05 02:40:41 +02:00
|
|
|
} else {
|
|
|
|
delete iter_;
|
|
|
|
}
|
|
|
|
}
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 11:44:14 +02:00
|
|
|
virtual bool Valid() const override { return iter_->Valid() && status_.ok(); }
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Seek(const Slice& target) override {
|
2013-12-20 18:35:24 +01:00
|
|
|
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
|
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
|
|
|
iter_->Seek(encoded);
|
|
|
|
}
|
2016-09-28 03:20:57 +02:00
|
|
|
virtual void SeekForPrev(const Slice& target) override {
|
|
|
|
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
|
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
|
|
|
iter_->SeekForPrev(encoded);
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void SeekToFirst() override { iter_->SeekToFirst(); }
|
|
|
|
virtual void SeekToLast() override { iter_->SeekToLast(); }
|
|
|
|
virtual void Next() override { iter_->Next(); }
|
|
|
|
virtual void Prev() override { iter_->Prev(); }
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice key() const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
assert(Valid());
|
2014-11-06 20:14:28 +01:00
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
if (!ParseInternalKey(iter_->key(), &parsed_key)) {
|
2013-12-20 18:35:24 +01:00
|
|
|
status_ = Status::Corruption("malformed internal key");
|
|
|
|
return Slice("corrupted key");
|
|
|
|
}
|
2014-11-06 20:14:28 +01:00
|
|
|
return parsed_key.user_key;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-01-24 19:57:15 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice value() const override { return iter_->value(); }
|
|
|
|
virtual Status status() const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
return status_.ok() ? iter_->status() : status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
mutable Status status_;
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter_;
|
2014-09-05 02:40:41 +02:00
|
|
|
bool arena_mode_;
|
2013-12-20 18:35:24 +01:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
KeyConvertingIterator(const KeyConvertingIterator&);
|
|
|
|
void operator=(const KeyConvertingIterator&);
|
|
|
|
};
|
|
|
|
|
|
|
|
class TableConstructor: public Constructor {
|
|
|
|
public:
|
2014-01-28 19:35:48 +01:00
|
|
|
explicit TableConstructor(const Comparator* cmp,
|
2018-04-10 01:17:15 +02:00
|
|
|
bool convert_to_internal_key = false,
|
|
|
|
int level = -1)
|
2014-02-08 01:25:38 +01:00
|
|
|
: Constructor(cmp),
|
2018-04-10 01:17:15 +02:00
|
|
|
convert_to_internal_key_(convert_to_internal_key),
|
|
|
|
level_(level) {}
|
2014-01-28 19:35:48 +01:00
|
|
|
~TableConstructor() { Reset(); }
|
2014-01-24 19:57:15 +01:00
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2018-03-05 22:08:17 +01:00
|
|
|
const BlockBasedTableOptions& /*table_options*/,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-09-02 22:58:22 +02:00
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Reset();
|
2015-09-17 01:57:43 +02:00
|
|
|
soptions.use_mmap_reads = ioptions.allow_mmap_reads;
|
2015-08-05 16:33:27 +02:00
|
|
|
file_writer_.reset(test::GetWritableFileWriter(new test::StringSink()));
|
2013-12-20 18:35:24 +01:00
|
|
|
unique_ptr<TableBuilder> builder;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
2016-04-07 08:10:32 +02:00
|
|
|
std::string column_family_name;
|
2014-09-05 01:18:36 +02:00
|
|
|
builder.reset(ioptions.table_factory->NewTableBuilder(
|
2018-04-10 01:17:15 +02:00
|
|
|
TableBuilderOptions(
|
|
|
|
ioptions, internal_comparator, &int_tbl_prop_collector_factories,
|
|
|
|
options.compression, CompressionOptions(),
|
|
|
|
nullptr /* compression_dict */, false /* skip_filters */,
|
|
|
|
column_family_name, level_),
|
2015-10-09 01:57:35 +02:00
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer_.get()));
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
2013-12-20 18:35:24 +01:00
|
|
|
if (convert_to_internal_key_) {
|
2014-11-06 20:14:28 +01:00
|
|
|
ParsedInternalKey ikey(kv.first, kMaxSequenceNumber, kTypeValue);
|
2013-12-20 18:35:24 +01:00
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
2014-11-06 20:14:28 +01:00
|
|
|
builder->Add(encoded, kv.second);
|
2013-12-20 18:35:24 +01:00
|
|
|
} else {
|
2014-11-06 20:14:28 +01:00
|
|
|
builder->Add(kv.first, kv.second);
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(builder->status().ok());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
Status s = builder->Finish();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer_->Flush();
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(s.ok()) << s.ToString();
|
2011-03-18 23:37:00 +01:00
|
|
|
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
EXPECT_EQ(GetSink()->contents().size(), builder->FileSize());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Open the table
|
2013-02-01 00:20:24 +01:00
|
|
|
uniq_id_ = cur_uniq_id_++;
|
2015-08-05 16:33:27 +02:00
|
|
|
file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
|
2018-04-10 01:17:15 +02:00
|
|
|
const bool skip_filters = false;
|
2014-09-05 01:18:36 +02:00
|
|
|
return ioptions.table_factory->NewTableReader(
|
2018-04-10 01:17:15 +02:00
|
|
|
TableReaderOptions(ioptions, soptions, internal_comparator,
|
|
|
|
skip_filters, level_),
|
2015-09-11 20:36:33 +02:00
|
|
|
std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator() const override {
|
2014-02-08 01:25:38 +01:00
|
|
|
ReadOptions ro;
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter = table_reader_->NewIterator(ro);
|
2013-12-20 18:35:24 +01:00
|
|
|
if (convert_to_internal_key_) {
|
|
|
|
return new KeyConvertingIterator(iter);
|
|
|
|
} else {
|
|
|
|
return iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ApproximateOffsetOf(const Slice& key) const {
|
2016-08-20 00:10:31 +02:00
|
|
|
if (convert_to_internal_key_) {
|
|
|
|
InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
const Slice skey = ikey.Encode();
|
|
|
|
return table_reader_->ApproximateOffsetOf(skey);
|
|
|
|
}
|
2013-10-30 18:52:33 +01:00
|
|
|
return table_reader_->ApproximateOffsetOf(key);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
virtual Status Reopen(const ImmutableCFOptions& ioptions) {
|
2015-08-05 16:33:27 +02:00
|
|
|
file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
|
2014-09-05 01:18:36 +02:00
|
|
|
return ioptions.table_factory->NewTableReader(
|
2015-09-11 20:36:33 +02:00
|
|
|
TableReaderOptions(ioptions, soptions, *last_internal_key_),
|
|
|
|
std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
virtual TableReader* GetTableReader() {
|
2013-10-30 18:52:33 +01:00
|
|
|
return table_reader_.get();
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool AnywayDeleteIterator() const override {
|
|
|
|
return convert_to_internal_key_;
|
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
void ResetTableReader() { table_reader_.reset(); }
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
bool ConvertToInternalKey() { return convert_to_internal_key_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
void Reset() {
|
2013-02-01 00:20:24 +01:00
|
|
|
uniq_id_ = 0;
|
2013-10-30 18:52:33 +01:00
|
|
|
table_reader_.reset();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer_.reset();
|
|
|
|
file_reader_.reset();
|
|
|
|
}
|
|
|
|
|
2015-08-05 16:33:27 +02:00
|
|
|
test::StringSink* GetSink() {
|
|
|
|
return static_cast<test::StringSink*>(file_writer_->writable_file());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
uint64_t uniq_id_;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<WritableFileWriter> file_writer_;
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader_;
|
2013-10-30 18:52:33 +01:00
|
|
|
unique_ptr<TableReader> table_reader_;
|
2014-09-05 02:40:41 +02:00
|
|
|
bool convert_to_internal_key_;
|
2018-04-10 01:17:15 +02:00
|
|
|
int level_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
TableConstructor();
|
2013-02-01 00:20:24 +01:00
|
|
|
|
|
|
|
static uint64_t cur_uniq_id_;
|
2015-09-17 01:57:43 +02:00
|
|
|
EnvOptions soptions;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
2013-12-20 18:35:24 +01:00
|
|
|
uint64_t TableConstructor::cur_uniq_id_ = 1;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class MemTableConstructor: public Constructor {
|
|
|
|
public:
|
2016-06-21 03:01:03 +02:00
|
|
|
explicit MemTableConstructor(const Comparator* cmp, WriteBufferManager* wb)
|
2011-03-18 23:37:00 +01:00
|
|
|
: Constructor(cmp),
|
2013-07-23 23:42:27 +02:00
|
|
|
internal_comparator_(cmp),
|
2016-06-21 03:01:03 +02:00
|
|
|
write_buffer_manager_(wb),
|
2013-07-23 23:42:27 +02:00
|
|
|
table_factory_(new SkipListFactory) {
|
2014-12-02 21:09:20 +01:00
|
|
|
options_.memtable_factory = table_factory_;
|
|
|
|
ImmutableCFOptions ioptions(options_);
|
2016-09-14 06:11:59 +02:00
|
|
|
memtable_ =
|
|
|
|
new MemTable(internal_comparator_, ioptions, MutableCFOptions(options_),
|
2017-06-02 21:08:01 +02:00
|
|
|
wb, kMaxSequenceNumber, 0 /* column_family_id */);
|
2011-05-21 04:17:43 +02:00
|
|
|
memtable_->Ref();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
~MemTableConstructor() {
|
2013-12-02 06:23:44 +01:00
|
|
|
delete memtable_->Unref();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2018-03-05 22:08:17 +01:00
|
|
|
virtual Status FinishImpl(
|
|
|
|
const Options&, const ImmutableCFOptions& ioptions,
|
|
|
|
const BlockBasedTableOptions& /*table_options*/,
|
|
|
|
const InternalKeyComparator& /*internal_comparator*/,
|
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2013-12-02 06:23:44 +01:00
|
|
|
delete memtable_->Unref();
|
2014-12-02 21:09:20 +01:00
|
|
|
ImmutableCFOptions mem_ioptions(ioptions);
|
2014-10-02 01:19:16 +02:00
|
|
|
memtable_ = new MemTable(internal_comparator_, mem_ioptions,
|
2016-09-14 06:11:59 +02:00
|
|
|
MutableCFOptions(options_), write_buffer_manager_,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2011-05-21 04:17:43 +02:00
|
|
|
memtable_->Ref();
|
2011-03-18 23:37:00 +01:00
|
|
|
int seq = 1;
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
|
|
|
memtable_->Add(seq, kTypeValue, kv.first, kv.second);
|
2011-03-18 23:37:00 +01:00
|
|
|
seq++;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator() const override {
|
2014-09-05 02:40:41 +02:00
|
|
|
return new KeyConvertingIterator(
|
|
|
|
memtable_->NewIterator(ReadOptions(), &arena_), true);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
virtual bool AnywayDeleteIterator() const override { return true; }
|
|
|
|
|
|
|
|
virtual bool IsArenaMode() const override { return true; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2014-09-05 02:40:41 +02:00
|
|
|
mutable Arena arena_;
|
2011-03-18 23:37:00 +01:00
|
|
|
InternalKeyComparator internal_comparator_;
|
2014-12-02 21:09:20 +01:00
|
|
|
Options options_;
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager* write_buffer_manager_;
|
2011-03-18 23:37:00 +01:00
|
|
|
MemTable* memtable_;
|
2013-07-23 23:42:27 +02:00
|
|
|
std::shared_ptr<SkipListFactory> table_factory_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
class InternalIteratorFromIterator : public InternalIterator {
|
|
|
|
public:
|
|
|
|
explicit InternalIteratorFromIterator(Iterator* it) : it_(it) {}
|
|
|
|
virtual bool Valid() const override { return it_->Valid(); }
|
|
|
|
virtual void Seek(const Slice& target) override { it_->Seek(target); }
|
2016-09-28 03:20:57 +02:00
|
|
|
virtual void SeekForPrev(const Slice& target) override {
|
|
|
|
it_->SeekForPrev(target);
|
|
|
|
}
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual void SeekToFirst() override { it_->SeekToFirst(); }
|
|
|
|
virtual void SeekToLast() override { it_->SeekToLast(); }
|
|
|
|
virtual void Next() override { it_->Next(); }
|
|
|
|
virtual void Prev() override { it_->Prev(); }
|
|
|
|
Slice key() const override { return it_->key(); }
|
|
|
|
Slice value() const override { return it_->value(); }
|
|
|
|
virtual Status status() const override { return it_->status(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
unique_ptr<Iterator> it_;
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class DBConstructor: public Constructor {
|
|
|
|
public:
|
|
|
|
explicit DBConstructor(const Comparator* cmp)
|
|
|
|
: Constructor(cmp),
|
|
|
|
comparator_(cmp) {
|
2013-03-01 03:04:58 +01:00
|
|
|
db_ = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
NewDB();
|
|
|
|
}
|
|
|
|
~DBConstructor() {
|
|
|
|
delete db_;
|
|
|
|
}
|
2018-03-05 22:08:17 +01:00
|
|
|
virtual Status FinishImpl(
|
|
|
|
const Options& /*options*/, const ImmutableCFOptions& /*ioptions*/,
|
|
|
|
const BlockBasedTableOptions& /*table_options*/,
|
|
|
|
const InternalKeyComparator& /*internal_comparator*/,
|
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
delete db_;
|
2013-03-01 03:04:58 +01:00
|
|
|
db_ = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
NewDB();
|
2014-11-06 20:14:28 +01:00
|
|
|
for (const auto kv : kv_map) {
|
2011-03-18 23:37:00 +01:00
|
|
|
WriteBatch batch;
|
2014-11-06 20:14:28 +01:00
|
|
|
batch.Put(kv.first, kv.second);
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-10-13 00:06:38 +02:00
|
|
|
|
|
|
|
virtual InternalIterator* NewIterator() const override {
|
|
|
|
return new InternalIteratorFromIterator(db_->NewIterator(ReadOptions()));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual DB* db() const override { return db_; }
|
2011-03-21 20:40:57 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
void NewDB() {
|
|
|
|
std::string name = test::TmpDir() + "/table_testdb";
|
|
|
|
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2011-03-18 23:37:00 +01:00
|
|
|
options.comparator = comparator_;
|
|
|
|
Status status = DestroyDB(name, options);
|
|
|
|
ASSERT_TRUE(status.ok()) << status.ToString();
|
|
|
|
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.error_if_exists = true;
|
2011-03-21 20:40:57 +01:00
|
|
|
options.write_buffer_size = 10000; // Something small to force merging
|
2011-03-18 23:37:00 +01:00
|
|
|
status = DB::Open(options, name, &db_);
|
|
|
|
ASSERT_TRUE(status.ok()) << status.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
const Comparator* comparator_;
|
|
|
|
DB* db_;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum TestType {
|
2013-12-20 18:35:24 +01:00
|
|
|
BLOCK_BASED_TABLE_TEST,
|
2015-07-20 20:09:14 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2013-12-20 18:35:24 +01:00
|
|
|
PLAIN_TABLE_SEMI_FIXED_PREFIX,
|
|
|
|
PLAIN_TABLE_FULL_STR_PREFIX,
|
2014-02-08 01:25:38 +01:00
|
|
|
PLAIN_TABLE_TOTAL_ORDER,
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2011-03-18 23:37:00 +01:00
|
|
|
BLOCK_TEST,
|
|
|
|
MEMTABLE_TEST,
|
2011-07-20 01:36:47 +02:00
|
|
|
DB_TEST
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct TestArgs {
|
|
|
|
TestType type;
|
|
|
|
bool reverse_compare;
|
|
|
|
int restart_interval;
|
2012-06-28 08:41:33 +02:00
|
|
|
CompressionType compression;
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t format_version;
|
2015-09-17 01:57:43 +02:00
|
|
|
bool use_mmap;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-11-20 07:00:48 +01:00
|
|
|
static std::vector<TestArgs> GenerateArgList() {
|
2014-01-24 20:09:04 +01:00
|
|
|
std::vector<TestArgs> test_args;
|
|
|
|
std::vector<TestType> test_types = {
|
2015-07-20 20:09:14 +02:00
|
|
|
BLOCK_BASED_TABLE_TEST,
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
PLAIN_TABLE_SEMI_FIXED_PREFIX,
|
|
|
|
PLAIN_TABLE_FULL_STR_PREFIX,
|
|
|
|
PLAIN_TABLE_TOTAL_ORDER,
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
BLOCK_TEST,
|
|
|
|
MEMTABLE_TEST, DB_TEST};
|
2014-01-24 20:09:04 +01:00
|
|
|
std::vector<bool> reverse_compare_types = {false, true};
|
|
|
|
std::vector<int> restart_intervals = {16, 1, 1024};
|
2012-06-28 08:41:33 +02:00
|
|
|
|
|
|
|
// Only add compression if it is supported
|
2015-01-15 01:24:24 +01:00
|
|
|
std::vector<std::pair<CompressionType, bool>> compression_types;
|
|
|
|
compression_types.emplace_back(kNoCompression, false);
|
2015-04-06 21:50:44 +02:00
|
|
|
if (Snappy_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kSnappyCompression, false);
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2015-04-06 21:50:44 +02:00
|
|
|
if (Zlib_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kZlibCompression, false);
|
|
|
|
compression_types.emplace_back(kZlibCompression, true);
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2015-04-06 21:50:44 +02:00
|
|
|
if (BZip2_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kBZip2Compression, false);
|
|
|
|
compression_types.emplace_back(kBZip2Compression, true);
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2015-04-06 21:50:44 +02:00
|
|
|
if (LZ4_Supported()) {
|
2015-01-15 01:24:24 +01:00
|
|
|
compression_types.emplace_back(kLZ4Compression, false);
|
|
|
|
compression_types.emplace_back(kLZ4Compression, true);
|
|
|
|
compression_types.emplace_back(kLZ4HCCompression, false);
|
|
|
|
compression_types.emplace_back(kLZ4HCCompression, true);
|
2014-02-08 03:12:30 +01:00
|
|
|
}
|
2016-04-20 07:54:24 +02:00
|
|
|
if (XPRESS_Supported()) {
|
|
|
|
compression_types.emplace_back(kXpressCompression, false);
|
|
|
|
compression_types.emplace_back(kXpressCompression, true);
|
|
|
|
}
|
2015-08-28 00:40:42 +02:00
|
|
|
if (ZSTD_Supported()) {
|
2016-09-02 00:28:40 +02:00
|
|
|
compression_types.emplace_back(kZSTD, false);
|
|
|
|
compression_types.emplace_back(kZSTD, true);
|
2015-08-28 00:40:42 +02:00
|
|
|
}
|
2012-06-29 04:26:43 +02:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
for (auto test_type : test_types) {
|
|
|
|
for (auto reverse_compare : reverse_compare_types) {
|
2015-07-20 20:09:14 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-01-24 20:09:04 +01:00
|
|
|
if (test_type == PLAIN_TABLE_SEMI_FIXED_PREFIX ||
|
2015-09-17 01:57:43 +02:00
|
|
|
test_type == PLAIN_TABLE_FULL_STR_PREFIX ||
|
|
|
|
test_type == PLAIN_TABLE_TOTAL_ORDER) {
|
2013-12-20 18:35:24 +01:00
|
|
|
// Plain table doesn't use restart index or compression.
|
|
|
|
TestArgs one_arg;
|
2014-01-24 20:09:04 +01:00
|
|
|
one_arg.type = test_type;
|
|
|
|
one_arg.reverse_compare = reverse_compare;
|
|
|
|
one_arg.restart_interval = restart_intervals[0];
|
2015-01-15 01:24:24 +01:00
|
|
|
one_arg.compression = compression_types[0].first;
|
2015-09-17 01:57:43 +02:00
|
|
|
one_arg.use_mmap = true;
|
|
|
|
test_args.push_back(one_arg);
|
|
|
|
one_arg.use_mmap = false;
|
2014-01-24 20:09:04 +01:00
|
|
|
test_args.push_back(one_arg);
|
2013-12-20 18:35:24 +01:00
|
|
|
continue;
|
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2012-06-28 08:41:33 +02:00
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
for (auto restart_interval : restart_intervals) {
|
|
|
|
for (auto compression_type : compression_types) {
|
2013-12-20 18:35:24 +01:00
|
|
|
TestArgs one_arg;
|
2014-01-24 20:09:04 +01:00
|
|
|
one_arg.type = test_type;
|
|
|
|
one_arg.reverse_compare = reverse_compare;
|
|
|
|
one_arg.restart_interval = restart_interval;
|
2015-01-15 01:24:24 +01:00
|
|
|
one_arg.compression = compression_type.first;
|
|
|
|
one_arg.format_version = compression_type.second ? 2 : 1;
|
2015-09-17 01:57:43 +02:00
|
|
|
one_arg.use_mmap = false;
|
2014-01-24 20:09:04 +01:00
|
|
|
test_args.push_back(one_arg);
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2014-01-24 20:09:04 +01:00
|
|
|
}
|
|
|
|
return test_args;
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
// In order to make all tests run for plain table format, including
|
|
|
|
// those operating on empty keys, create a new prefix transformer which
|
|
|
|
// return fixed prefix if the slice is not shorter than the prefix length,
|
|
|
|
// and the full slice if it is shorter.
|
|
|
|
class FixedOrLessPrefixTransform : public SliceTransform {
|
|
|
|
private:
|
|
|
|
const size_t prefix_len_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit FixedOrLessPrefixTransform(size_t prefix_len) :
|
|
|
|
prefix_len_(prefix_len) {
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual const char* Name() const override { return "rocksdb.FixedPrefix"; }
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice Transform(const Slice& src) const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
assert(InDomain(src));
|
|
|
|
if (src.size() < prefix_len_) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
return Slice(src.data(), prefix_len_);
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
virtual bool InDomain(const Slice& /*src*/) const override { return true; }
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual bool InRange(const Slice& dst) const override {
|
2013-12-20 18:35:24 +01:00
|
|
|
return (dst.size() <= prefix_len_);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class HarnessTest : public testing::Test {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
2015-03-17 02:08:59 +01:00
|
|
|
HarnessTest()
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
: ioptions_(options_),
|
|
|
|
constructor_(nullptr),
|
|
|
|
write_buffer_(options_.db_write_buffer_size) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void Init(const TestArgs& args) {
|
|
|
|
delete constructor_;
|
2013-03-01 03:04:58 +01:00
|
|
|
constructor_ = nullptr;
|
2013-11-20 07:00:48 +01:00
|
|
|
options_ = Options();
|
2012-06-28 08:41:33 +02:00
|
|
|
options_.compression = args.compression;
|
2011-03-18 23:37:00 +01:00
|
|
|
// Use shorter block size for tests to exercise block boundary
|
|
|
|
// conditions more.
|
|
|
|
if (args.reverse_compare) {
|
|
|
|
options_.comparator = &reverse_key_comparator;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
|
|
|
|
internal_comparator_.reset(
|
|
|
|
new test::PlainInternalKeyComparator(options_.comparator));
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
support_prev_ = true;
|
|
|
|
only_support_prefix_seek_ = false;
|
2015-09-17 01:57:43 +02:00
|
|
|
options_.allow_mmap_reads = args.use_mmap;
|
2011-03-18 23:37:00 +01:00
|
|
|
switch (args.type) {
|
2013-12-20 18:35:24 +01:00
|
|
|
case BLOCK_BASED_TABLE_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.flush_block_policy_factory.reset(
|
2014-03-01 01:39:27 +01:00
|
|
|
new FlushBlockBySizePolicyFactory());
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
table_options_.block_restart_interval = args.restart_interval;
|
2016-02-05 19:22:37 +01:00
|
|
|
table_options_.index_block_restart_interval = args.restart_interval;
|
2015-01-15 01:24:24 +01:00
|
|
|
table_options_.format_version = args.format_version;
|
2014-08-25 23:22:05 +02:00
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2016-08-20 00:10:31 +02:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2013-12-20 18:35:24 +01:00
|
|
|
break;
|
2015-07-20 20:09:14 +02:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2013-12-20 18:35:24 +01:00
|
|
|
case PLAIN_TABLE_SEMI_FIXED_PREFIX:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = true;
|
2014-03-10 20:56:46 +01:00
|
|
|
options_.prefix_extractor.reset(new FixedOrLessPrefixTransform(2));
|
2014-02-08 01:25:38 +01:00
|
|
|
options_.table_factory.reset(NewPlainTableFactory());
|
2016-08-20 00:10:31 +02:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
2014-01-27 22:53:22 +01:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2013-12-20 18:35:24 +01:00
|
|
|
break;
|
|
|
|
case PLAIN_TABLE_FULL_STR_PREFIX:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = true;
|
2014-03-10 20:56:46 +01:00
|
|
|
options_.prefix_extractor.reset(NewNoopTransform());
|
2014-02-08 01:25:38 +01:00
|
|
|
options_.table_factory.reset(NewPlainTableFactory());
|
2016-08-20 00:10:31 +02:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
2014-02-08 01:25:38 +01:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
|
|
|
break;
|
|
|
|
case PLAIN_TABLE_TOTAL_ORDER:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = false;
|
|
|
|
options_.prefix_extractor = nullptr;
|
2014-07-18 09:08:38 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = kPlainTableVariableLength;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
options_.table_factory.reset(
|
|
|
|
NewPlainTableFactory(plain_table_options));
|
|
|
|
}
|
2016-08-20 00:10:31 +02:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
2014-01-27 22:53:22 +01:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2011-03-18 23:37:00 +01:00
|
|
|
break;
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2011-03-18 23:37:00 +01:00
|
|
|
case BLOCK_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2011-03-18 23:37:00 +01:00
|
|
|
constructor_ = new BlockConstructor(options_.comparator);
|
|
|
|
break;
|
|
|
|
case MEMTABLE_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2014-12-02 21:09:20 +01:00
|
|
|
constructor_ = new MemTableConstructor(options_.comparator,
|
|
|
|
&write_buffer_);
|
2011-03-18 23:37:00 +01:00
|
|
|
break;
|
|
|
|
case DB_TEST:
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2011-03-18 23:37:00 +01:00
|
|
|
constructor_ = new DBConstructor(options_.comparator);
|
|
|
|
break;
|
|
|
|
}
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions_ = ImmutableCFOptions(options_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
~HarnessTest() { delete constructor_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void Add(const std::string& key, const std::string& value) {
|
|
|
|
constructor_->Add(key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Test(Random* rnd) {
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap data;
|
2014-09-05 01:18:36 +02:00
|
|
|
constructor_->Finish(options_, ioptions_, table_options_,
|
|
|
|
*internal_comparator_, &keys, &data);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
TestForwardScan(keys, data);
|
2013-12-20 18:35:24 +01:00
|
|
|
if (support_prev_) {
|
|
|
|
TestBackwardScan(keys, data);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
TestRandomAccess(rnd, keys, data);
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
void TestForwardScan(const std::vector<std::string>& /*keys*/,
|
2015-09-02 22:58:22 +02:00
|
|
|
const stl_wrappers::KVMap& data) {
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter = constructor_->NewIterator();
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
iter->SeekToFirst();
|
2015-09-02 22:58:22 +02:00
|
|
|
for (stl_wrappers::KVMap::const_iterator model_iter = data.begin();
|
|
|
|
model_iter != data.end(); ++model_iter) {
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-09-05 02:40:41 +02:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
2015-10-13 00:06:38 +02:00
|
|
|
iter->~InternalIterator();
|
2014-09-05 02:40:41 +02:00
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
void TestBackwardScan(const std::vector<std::string>& /*keys*/,
|
2015-09-02 22:58:22 +02:00
|
|
|
const stl_wrappers::KVMap& data) {
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter = constructor_->NewIterator();
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
iter->SeekToLast();
|
2015-09-02 22:58:22 +02:00
|
|
|
for (stl_wrappers::KVMap::const_reverse_iterator model_iter = data.rbegin();
|
|
|
|
model_iter != data.rend(); ++model_iter) {
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
iter->Prev();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-09-05 02:40:41 +02:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
2015-10-13 00:06:38 +02:00
|
|
|
iter->~InternalIterator();
|
2014-09-05 02:40:41 +02:00
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-09-02 22:58:22 +02:00
|
|
|
void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
|
|
|
|
const stl_wrappers::KVMap& data) {
|
2011-03-18 23:37:00 +01:00
|
|
|
static const bool kVerbose = false;
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter = constructor_->NewIterator();
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap::const_iterator model_iter = data.begin();
|
2011-03-18 23:37:00 +01:00
|
|
|
if (kVerbose) fprintf(stderr, "---\n");
|
|
|
|
for (int i = 0; i < 200; i++) {
|
2013-12-20 18:35:24 +01:00
|
|
|
const int toss = rnd->Uniform(support_prev_ ? 5 : 3);
|
2011-03-18 23:37:00 +01:00
|
|
|
switch (toss) {
|
|
|
|
case 0: {
|
|
|
|
if (iter->Valid()) {
|
|
|
|
if (kVerbose) fprintf(stderr, "Next\n");
|
|
|
|
iter->Next();
|
|
|
|
++model_iter;
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 1: {
|
|
|
|
if (kVerbose) fprintf(stderr, "SeekToFirst\n");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
model_iter = data.begin();
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 2: {
|
|
|
|
std::string key = PickRandomKey(rnd, keys);
|
|
|
|
model_iter = data.lower_bound(key);
|
|
|
|
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
|
|
|
|
EscapeString(key).c_str());
|
|
|
|
iter->Seek(Slice(key));
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 3: {
|
|
|
|
if (iter->Valid()) {
|
|
|
|
if (kVerbose) fprintf(stderr, "Prev\n");
|
|
|
|
iter->Prev();
|
|
|
|
if (model_iter == data.begin()) {
|
|
|
|
model_iter = data.end(); // Wrap around to invalid value
|
|
|
|
} else {
|
|
|
|
--model_iter;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 4: {
|
|
|
|
if (kVerbose) fprintf(stderr, "SeekToLast\n");
|
|
|
|
iter->SeekToLast();
|
|
|
|
if (keys.empty()) {
|
|
|
|
model_iter = data.end();
|
|
|
|
} else {
|
|
|
|
std::string last = data.rbegin()->first;
|
|
|
|
model_iter = data.lower_bound(last);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-05 02:40:41 +02:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
2015-10-13 00:06:38 +02:00
|
|
|
iter->~InternalIterator();
|
2014-09-05 02:40:41 +02:00
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-09-02 22:58:22 +02:00
|
|
|
std::string ToString(const stl_wrappers::KVMap& data,
|
|
|
|
const stl_wrappers::KVMap::const_iterator& it) {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (it == data.end()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->first + "->" + it->second + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-02 22:58:22 +02:00
|
|
|
std::string ToString(const stl_wrappers::KVMap& data,
|
|
|
|
const stl_wrappers::KVMap::const_reverse_iterator& it) {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (it == data.rend()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->first + "->" + it->second + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
std::string ToString(const InternalIterator* it) {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (!it->Valid()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->key().ToString() + "->" + it->value().ToString() + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) {
|
|
|
|
if (keys.empty()) {
|
|
|
|
return "foo";
|
|
|
|
} else {
|
2014-11-11 22:47:22 +01:00
|
|
|
const int index = rnd->Uniform(static_cast<int>(keys.size()));
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string result = keys[index];
|
2013-12-20 18:35:24 +01:00
|
|
|
switch (rnd->Uniform(support_prev_ ? 3 : 1)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
case 0:
|
|
|
|
// Return an existing key
|
|
|
|
break;
|
|
|
|
case 1: {
|
|
|
|
// Attempt to return something smaller than an existing key
|
2013-12-20 18:35:24 +01:00
|
|
|
if (result.size() > 0 && result[result.size() - 1] > '\0'
|
|
|
|
&& (!only_support_prefix_seek_
|
|
|
|
|| options_.prefix_extractor->Transform(result).size()
|
|
|
|
< result.size())) {
|
|
|
|
result[result.size() - 1]--;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
break;
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
case 2: {
|
|
|
|
// Return something larger than an existing key
|
|
|
|
Increment(options_.comparator, &result);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
// Returns nullptr if not running against a DB
|
2011-03-21 20:40:57 +01:00
|
|
|
DB* db() const { return constructor_->db(); }
|
|
|
|
|
2018-01-29 16:36:05 +01:00
|
|
|
void RandomizedHarnessTest(size_t part, size_t total) {
|
|
|
|
std::vector<TestArgs> args = GenerateArgList();
|
|
|
|
assert(part);
|
|
|
|
assert(part <= total);
|
2018-05-05 00:14:54 +02:00
|
|
|
for (unsigned int i = 0; i < args.size(); i++) {
|
|
|
|
if ((i % total) + 1 != part) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-29 16:36:05 +01:00
|
|
|
Init(args[i]);
|
|
|
|
Random rnd(test::RandomSeed() + 5);
|
|
|
|
for (int num_entries = 0; num_entries < 2000;
|
|
|
|
num_entries += (num_entries < 50 ? 1 : 200)) {
|
|
|
|
for (int e = 0; e < num_entries; e++) {
|
|
|
|
std::string v;
|
|
|
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
|
|
|
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
|
|
}
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options_ = Options();
|
2014-09-05 01:18:36 +02:00
|
|
|
ImmutableCFOptions ioptions_;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options_ = BlockBasedTableOptions();
|
2011-03-18 23:37:00 +01:00
|
|
|
Constructor* constructor_;
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager write_buffer_;
|
2013-12-20 18:35:24 +01:00
|
|
|
bool support_prev_;
|
|
|
|
bool only_support_prefix_seek_;
|
2014-01-27 22:53:22 +01:00
|
|
|
shared_ptr<InternalKeyComparator> internal_comparator_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|
|
|
bool result = (val >= low) && (val <= high);
|
|
|
|
if (!result) {
|
|
|
|
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
|
|
|
(unsigned long long)(val),
|
|
|
|
(unsigned long long)(low),
|
|
|
|
(unsigned long long)(high));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-01-24 21:14:08 +01:00
|
|
|
// Tests against all kinds of tables
|
2015-03-17 22:08:00 +01:00
|
|
|
class TableTest : public testing::Test {
|
2014-01-27 22:53:22 +01:00
|
|
|
public:
|
|
|
|
const InternalKeyComparator& GetPlainInternalComparator(
|
|
|
|
const Comparator* comp) {
|
|
|
|
if (!plain_internal_comparator) {
|
|
|
|
plain_internal_comparator.reset(
|
|
|
|
new test::PlainInternalKeyComparator(comp));
|
|
|
|
}
|
|
|
|
return *plain_internal_comparator;
|
|
|
|
}
|
2017-02-07 01:29:29 +01:00
|
|
|
void IndexTest(BlockBasedTableOptions table_options);
|
2014-01-27 22:53:22 +01:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<InternalKeyComparator> plain_internal_comparator;
|
|
|
|
};
|
|
|
|
|
|
|
|
class GeneralTableTest : public TableTest {};
|
2018-01-11 00:06:29 +01:00
|
|
|
class BlockBasedTableTest : public TableTest {
|
|
|
|
protected:
|
|
|
|
uint64_t IndexUncompressedHelper(bool indexCompress);
|
|
|
|
};
|
2014-01-27 22:53:22 +01:00
|
|
|
class PlainTableTest : public TableTest {};
|
2015-03-17 22:08:00 +01:00
|
|
|
class TablePropertyTest : public testing::Test {};
|
2014-02-12 22:14:59 +01:00
|
|
|
|
|
|
|
// This test serves as the living tutorial for the prefix scan of user collected
|
|
|
|
// properties.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(TablePropertyTest, PrefixScanTest) {
|
2014-02-12 22:14:59 +01:00
|
|
|
UserCollectedProperties props{{"num.111.1", "1"},
|
|
|
|
{"num.111.2", "2"},
|
|
|
|
{"num.111.3", "3"},
|
|
|
|
{"num.333.1", "1"},
|
|
|
|
{"num.333.2", "2"},
|
|
|
|
{"num.333.3", "3"},
|
|
|
|
{"num.555.1", "1"},
|
|
|
|
{"num.555.2", "2"},
|
|
|
|
{"num.555.3", "3"}, };
|
|
|
|
|
|
|
|
// prefixes that exist
|
|
|
|
for (const std::string& prefix : {"num.111", "num.333", "num.555"}) {
|
|
|
|
int num = 0;
|
|
|
|
for (auto pos = props.lower_bound(prefix);
|
|
|
|
pos != props.end() &&
|
|
|
|
pos->first.compare(0, prefix.size(), prefix) == 0;
|
|
|
|
++pos) {
|
|
|
|
++num;
|
2014-11-25 05:44:49 +01:00
|
|
|
auto key = prefix + "." + ToString(num);
|
2014-02-12 22:14:59 +01:00
|
|
|
ASSERT_EQ(key, pos->first);
|
2014-11-25 05:44:49 +01:00
|
|
|
ASSERT_EQ(ToString(num), pos->second);
|
2014-02-12 22:14:59 +01:00
|
|
|
}
|
|
|
|
ASSERT_EQ(3, num);
|
|
|
|
}
|
|
|
|
|
|
|
|
// prefixes that don't exist
|
|
|
|
for (const std::string& prefix :
|
|
|
|
{"num.000", "num.222", "num.444", "num.666"}) {
|
|
|
|
auto pos = props.lower_bound(prefix);
|
|
|
|
ASSERT_TRUE(pos == props.end() ||
|
|
|
|
pos->first.compare(0, prefix.size(), prefix) != 0);
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
// This test include all the basic checks except those for index size and block
|
|
|
|
// size, which will be conducted in separated unit tests.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, BasicBlockBasedTableProperties) {
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
c.Add("a1", "val1");
|
|
|
|
c.Add("b2", "val2");
|
|
|
|
c.Add("c3", "val3");
|
|
|
|
c.Add("d4", "val4");
|
|
|
|
c.Add("e5", "val5");
|
|
|
|
c.Add("f6", "val6");
|
|
|
|
c.Add("g7", "val7");
|
|
|
|
c.Add("h8", "val8");
|
|
|
|
c.Add("j9", "val9");
|
2016-08-20 00:10:31 +02:00
|
|
|
uint64_t diff_internal_user_bytes = 9 * 8; // 8 is seq size, 9 k-v totally
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2013-10-10 20:43:24 +02:00
|
|
|
options.compression = kNoCompression;
|
2018-01-11 00:06:29 +01:00
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
options.statistics->stats_level_ = StatsLevel::kAll;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2018-01-11 00:06:29 +01:00
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
ioptions.statistics = options.statistics.get();
|
2014-09-05 01:18:36 +02:00
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2018-01-11 00:06:29 +01:00
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_NOT_COMPRESSED), 0);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
2013-11-20 01:29:42 +01:00
|
|
|
ASSERT_EQ(kvmap.size(), props.num_entries);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
auto raw_key_size = kvmap.size() * 2ul;
|
|
|
|
auto raw_value_size = kvmap.size() * 4ul;
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
ASSERT_EQ(raw_key_size + diff_internal_user_bytes, props.raw_key_size);
|
2013-11-20 01:29:42 +01:00
|
|
|
ASSERT_EQ(raw_value_size, props.raw_value_size);
|
|
|
|
ASSERT_EQ(1ul, props.num_data_blocks);
|
|
|
|
ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
// Verify data size.
|
2014-09-02 20:49:38 +02:00
|
|
|
BlockBuilder block_builder(1);
|
2013-10-10 20:43:24 +02:00
|
|
|
for (const auto& item : kvmap) {
|
|
|
|
block_builder.Add(item.first, item.second);
|
|
|
|
}
|
|
|
|
Slice content = block_builder.Finish();
|
2016-08-20 00:10:31 +02:00
|
|
|
ASSERT_EQ(content.size() + kBlockTrailerSize + diff_internal_user_bytes,
|
|
|
|
props.data_size);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 02:26:29 +01:00
|
|
|
#ifdef SNAPPY
|
2018-01-11 00:06:29 +01:00
|
|
|
uint64_t BlockBasedTableTest::IndexUncompressedHelper(bool compressed) {
|
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
|
|
|
constexpr size_t kNumKeys = 10000;
|
|
|
|
|
|
|
|
for (size_t k = 0; k < kNumKeys; ++k) {
|
|
|
|
c.Add("key" + ToString(k), "val" + ToString(k));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
Options options;
|
|
|
|
options.compression = kSnappyCompression;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
options.statistics->stats_level_ = StatsLevel::kAll;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
table_options.enable_index_compression = compressed;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
ioptions.statistics = options.statistics.get();
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
c.ResetTableReader();
|
|
|
|
return options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED);
|
|
|
|
}
|
|
|
|
TEST_F(BlockBasedTableTest, IndexUncompressed) {
|
|
|
|
uint64_t tbl1_compressed_cnt = IndexUncompressedHelper(true);
|
|
|
|
uint64_t tbl2_compressed_cnt = IndexUncompressedHelper(false);
|
|
|
|
// tbl1_compressed_cnt should include 1 index block
|
|
|
|
EXPECT_EQ(tbl2_compressed_cnt + 1, tbl1_compressed_cnt);
|
|
|
|
}
|
2018-01-17 02:26:29 +01:00
|
|
|
#endif // SNAPPY
|
2018-01-11 00:06:29 +01:00
|
|
|
|
2016-04-21 19:16:28 +02:00
|
|
|
TEST_F(BlockBasedTableTest, BlockBasedTableProperties2) {
|
|
|
|
TableConstructor c(&reverse_key_comparator);
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
|
|
|
|
{
|
|
|
|
Options options;
|
2016-05-12 18:47:16 +02:00
|
|
|
options.compression = CompressionType::kNoCompression;
|
2016-04-21 19:16:28 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
|
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
|
|
|
|
|
|
|
// Default comparator
|
|
|
|
ASSERT_EQ("leveldb.BytewiseComparator", props.comparator_name);
|
|
|
|
// No merge operator
|
|
|
|
ASSERT_EQ("nullptr", props.merge_operator_name);
|
2016-08-26 20:46:32 +02:00
|
|
|
// No prefix extractor
|
|
|
|
ASSERT_EQ("nullptr", props.prefix_extractor_name);
|
2016-04-21 19:16:28 +02:00
|
|
|
// No property collectors
|
|
|
|
ASSERT_EQ("[]", props.property_collectors_names);
|
|
|
|
// No filter policy is used
|
|
|
|
ASSERT_EQ("", props.filter_policy_name);
|
2016-05-12 18:47:16 +02:00
|
|
|
// Compression type == that set:
|
|
|
|
ASSERT_EQ("NoCompression", props.compression_name);
|
2016-04-21 19:16:28 +02:00
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
Options options;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
options.comparator = &reverse_key_comparator;
|
|
|
|
options.merge_operator = MergeOperators::CreateUInt64AddOperator();
|
2016-08-26 20:46:32 +02:00
|
|
|
options.prefix_extractor.reset(NewNoopTransform());
|
2016-04-21 19:16:28 +02:00
|
|
|
options.table_properties_collector_factories.emplace_back(
|
|
|
|
new DummyPropertiesCollectorFactory1());
|
|
|
|
options.table_properties_collector_factories.emplace_back(
|
|
|
|
new DummyPropertiesCollectorFactory2());
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
|
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
|
|
|
|
|
|
|
ASSERT_EQ("rocksdb.ReverseBytewiseComparator", props.comparator_name);
|
|
|
|
ASSERT_EQ("UInt64AddOperator", props.merge_operator_name);
|
2016-08-26 20:46:32 +02:00
|
|
|
ASSERT_EQ("rocksdb.Noop", props.prefix_extractor_name);
|
2016-04-21 19:16:28 +02:00
|
|
|
ASSERT_EQ("[DummyPropertiesCollector1,DummyPropertiesCollector2]",
|
|
|
|
props.property_collectors_names);
|
|
|
|
ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
|
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
TEST_F(BlockBasedTableTest, RangeDelBlock) {
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
std::vector<std::string> keys = {"1pika", "2chu"};
|
|
|
|
std::vector<std::string> vals = {"p", "c"};
|
|
|
|
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
RangeTombstone t(keys[i], vals[i], i);
|
|
|
|
std::pair<InternalKey, Slice> p = t.Serialize();
|
|
|
|
c.Add(p.first.Encode().ToString(), p.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> sorted_keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
Options options;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
std::unique_ptr<InternalKeyComparator> internal_cmp(
|
|
|
|
new InternalKeyComparator(options.comparator));
|
|
|
|
c.Finish(options, ioptions, table_options, *internal_cmp, &sorted_keys,
|
|
|
|
&kvmap);
|
|
|
|
|
2016-11-05 17:10:51 +01:00
|
|
|
for (int j = 0; j < 2; ++j) {
|
|
|
|
std::unique_ptr<InternalIterator> iter(
|
|
|
|
c.GetTableReader()->NewRangeTombstoneIterator(ReadOptions()));
|
|
|
|
if (j > 0) {
|
|
|
|
// For second iteration, delete the table reader object and verify the
|
|
|
|
// iterator can still access its metablock's range tombstones.
|
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
2017-03-09 07:16:45 +01:00
|
|
|
ASSERT_FALSE(iter->Valid());
|
2016-11-05 17:10:51 +01:00
|
|
|
iter->SeekToFirst();
|
2017-03-09 07:16:45 +01:00
|
|
|
ASSERT_TRUE(iter->Valid());
|
2016-11-05 17:10:51 +01:00
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &parsed_key));
|
|
|
|
RangeTombstone t(parsed_key, iter->value());
|
|
|
|
ASSERT_EQ(t.start_key_, keys[i]);
|
|
|
|
ASSERT_EQ(t.end_key_, vals[i]);
|
|
|
|
ASSERT_EQ(t.seq_, i);
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
}
|
2016-08-20 00:10:31 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, FilterPolicyNameProperties) {
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-10-17 01:57:20 +02:00
|
|
|
c.Add("a1", "val1");
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2014-08-25 23:22:05 +02:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-17 01:57:20 +02:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
2013-11-20 01:29:42 +01:00
|
|
|
ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2013-10-17 01:57:20 +02:00
|
|
|
}
|
|
|
|
|
2015-03-03 02:07:03 +01:00
|
|
|
//
|
|
|
|
// BlockBasedTableTest::PrefetchTest
|
|
|
|
//
|
|
|
|
void AssertKeysInCache(BlockBasedTable* table_reader,
|
2015-09-02 22:58:22 +02:00
|
|
|
const std::vector<std::string>& keys_in_cache,
|
2016-08-20 00:10:31 +02:00
|
|
|
const std::vector<std::string>& keys_not_in_cache,
|
|
|
|
bool convert = false) {
|
|
|
|
if (convert) {
|
|
|
|
for (auto key : keys_in_cache) {
|
|
|
|
InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), ikey.Encode()));
|
|
|
|
}
|
|
|
|
for (auto key : keys_not_in_cache) {
|
|
|
|
InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), ikey.Encode()));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (auto key : keys_in_cache) {
|
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
|
|
|
for (auto key : keys_not_in_cache) {
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
2015-03-03 02:07:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrefetchRange(TableConstructor* c, Options* opt,
|
2016-08-20 00:10:31 +02:00
|
|
|
BlockBasedTableOptions* table_options, const char* key_begin,
|
2015-09-02 22:58:22 +02:00
|
|
|
const char* key_end,
|
|
|
|
const std::vector<std::string>& keys_in_cache,
|
|
|
|
const std::vector<std::string>& keys_not_in_cache,
|
2015-03-03 02:07:03 +01:00
|
|
|
const Status expected_status = Status::OK()) {
|
|
|
|
// reset the cache and reopen the table
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options->block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2015-03-03 02:07:03 +01:00
|
|
|
opt->table_factory.reset(NewBlockBasedTableFactory(*table_options));
|
|
|
|
const ImmutableCFOptions ioptions2(*opt);
|
|
|
|
ASSERT_OK(c->Reopen(ioptions2));
|
|
|
|
|
|
|
|
// prefetch
|
|
|
|
auto* table_reader = dynamic_cast<BlockBasedTable*>(c->GetTableReader());
|
2016-08-20 00:10:31 +02:00
|
|
|
Status s;
|
|
|
|
unique_ptr<Slice> begin, end;
|
|
|
|
unique_ptr<InternalKey> i_begin, i_end;
|
|
|
|
if (key_begin != nullptr) {
|
|
|
|
if (c->ConvertToInternalKey()) {
|
|
|
|
i_begin.reset(new InternalKey(key_begin, kMaxSequenceNumber, kTypeValue));
|
|
|
|
begin.reset(new Slice(i_begin->Encode()));
|
|
|
|
} else {
|
|
|
|
begin.reset(new Slice(key_begin));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (key_end != nullptr) {
|
|
|
|
if (c->ConvertToInternalKey()) {
|
|
|
|
i_end.reset(new InternalKey(key_end, kMaxSequenceNumber, kTypeValue));
|
|
|
|
end.reset(new Slice(i_end->Encode()));
|
|
|
|
} else {
|
|
|
|
end.reset(new Slice(key_end));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = table_reader->Prefetch(begin.get(), end.get());
|
|
|
|
|
2015-03-03 02:07:03 +01:00
|
|
|
ASSERT_TRUE(s.code() == expected_status.code());
|
|
|
|
|
|
|
|
// assert our expectation in cache warmup
|
2016-08-20 00:10:31 +02:00
|
|
|
AssertKeysInCache(table_reader, keys_in_cache, keys_not_in_cache,
|
|
|
|
c->ConvertToInternalKey());
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c->ResetTableReader();
|
2015-03-03 02:07:03 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, PrefetchTest) {
|
2015-03-03 02:07:03 +01:00
|
|
|
// The purpose of this test is to test the prefetching operation built into
|
|
|
|
// BlockBasedTable.
|
|
|
|
Options opt;
|
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
|
|
|
opt.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
// big enough so we don't ever lose cached values.
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2015-03-03 02:07:03 +01:00
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2015-03-03 02:07:03 +01:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2015-03-03 02:07:03 +01:00
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2015-03-03 02:07:03 +01:00
|
|
|
|
|
|
|
// We get the following data spread :
|
|
|
|
//
|
|
|
|
// Data block Index
|
|
|
|
// ========================
|
|
|
|
// [ k01 k02 k03 ] k03
|
|
|
|
// [ k04 ] k04
|
|
|
|
// [ k05 ] k05
|
|
|
|
// [ k06 k07 ] k07
|
|
|
|
|
|
|
|
|
|
|
|
// Simple
|
2016-08-20 00:10:31 +02:00
|
|
|
PrefetchRange(&c, &opt, &table_options,
|
|
|
|
/*key_range=*/"k01", "k05",
|
|
|
|
/*keys_in_cache=*/{"k01", "k02", "k03", "k04", "k05"},
|
|
|
|
/*keys_not_in_cache=*/{"k06", "k07"});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k01", "k01", {"k01", "k02", "k03"},
|
2015-03-03 02:07:03 +01:00
|
|
|
{"k04", "k05", "k06", "k07"});
|
|
|
|
// odd
|
2016-08-20 00:10:31 +02:00
|
|
|
PrefetchRange(&c, &opt, &table_options, "a", "z",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k00", "k00", {"k01", "k02", "k03"},
|
2015-03-03 02:07:03 +01:00
|
|
|
{"k04", "k05", "k06", "k07"});
|
|
|
|
// Edge cases
|
2016-08-20 00:10:31 +02:00
|
|
|
PrefetchRange(&c, &opt, &table_options, "k00", "k06",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k00", "zzz",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
2015-03-03 02:07:03 +01:00
|
|
|
// null keys
|
2016-08-20 00:10:31 +02:00
|
|
|
PrefetchRange(&c, &opt, &table_options, nullptr, nullptr,
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k04", nullptr,
|
|
|
|
{"k04", "k05", "k06", "k07"}, {"k01", "k02", "k03"});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, nullptr, "k05",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05"}, {"k06", "k07"});
|
2015-03-03 02:07:03 +01:00
|
|
|
// invalid
|
2016-08-20 00:10:31 +02:00
|
|
|
PrefetchRange(&c, &opt, &table_options, "k06", "k00", {}, {},
|
2015-03-03 02:07:03 +01:00
|
|
|
Status::InvalidArgument(Slice("k06 "), Slice("k07")));
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2015-03-03 02:07:03 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) {
|
2014-08-26 01:14:30 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
Options options;
|
|
|
|
// Make each key/value an individual block
|
|
|
|
table_options.block_size = 64;
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
|
|
|
// Binary search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// Hash search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// Hash search index with hash_index_allow_collision
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.hash_index_allow_collision = true;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
// Hash search index with filter policy
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
2017-02-07 01:29:29 +01:00
|
|
|
case 4:
|
|
|
|
default:
|
|
|
|
// Binary search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
break;
|
2014-08-26 01:14:30 +02:00
|
|
|
}
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(),
|
|
|
|
true /* convert_to_internal_key_ */);
|
2014-08-26 01:14:30 +02:00
|
|
|
c.Add("aaaa1", std::string('a', 56));
|
|
|
|
c.Add("bbaa1", std::string('a', 56));
|
|
|
|
c.Add("cccc1", std::string('a', 56));
|
|
|
|
c.Add("bbbb1", std::string('a', 56));
|
|
|
|
c.Add("baaa1", std::string('a', 56));
|
|
|
|
c.Add("abbb1", std::string('a', 56));
|
|
|
|
c.Add("cccc2", std::string('a', 56));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-26 01:14:30 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
auto props = c.GetTableReader()->GetTableProperties();
|
|
|
|
ASSERT_EQ(7u, props->num_data_blocks);
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.total_order_seek = true;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
2014-08-26 01:14:30 +02:00
|
|
|
|
|
|
|
iter->Seek(InternalKey("b", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("baaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("bb", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("bbb", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("cccc1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-06 20:46:32 +01:00
|
|
|
TEST_F(BlockBasedTableTest, NoopTransformSeek) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.comparator = BytewiseComparator();
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewNoopTransform());
|
|
|
|
|
|
|
|
TableConstructor c(options.comparator);
|
|
|
|
// To tickle the PrefixMayMatch bug it is important that the
|
|
|
|
// user-key is a single byte so that the index key exactly matches
|
|
|
|
// the user-key.
|
|
|
|
InternalKey key("a", 1, kTypeValue);
|
|
|
|
c.Add(key.Encode().ToString(), "b");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
2016-01-07 18:48:29 +01:00
|
|
|
const InternalKeyComparator internal_comparator(options.comparator);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator, &keys,
|
|
|
|
&kvmap);
|
2016-01-06 20:46:32 +01:00
|
|
|
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.total_order_seek = (i == 0);
|
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
|
|
|
|
|
|
|
iter->Seek(key.Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("a", ExtractUserKey(iter->key()).ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-26 20:46:32 +02:00
|
|
|
TEST_F(BlockBasedTableTest, SkipPrefixBloomFilter) {
|
|
|
|
// if DB is opened with a prefix extractor of a different name,
|
|
|
|
// prefix bloom is skipped when read the file
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(2));
|
|
|
|
table_options.whole_key_filtering = false;
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.comparator = BytewiseComparator();
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
|
|
|
|
|
|
|
TableConstructor c(options.comparator);
|
|
|
|
InternalKey key("abcdefghijk", 1, kTypeValue);
|
|
|
|
c.Add(key.Encode().ToString(), "test");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
const InternalKeyComparator internal_comparator(options.comparator);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator, &keys,
|
|
|
|
&kvmap);
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(9));
|
|
|
|
const ImmutableCFOptions new_ioptions(options);
|
|
|
|
c.Reopen(new_ioptions);
|
|
|
|
auto reader = c.GetTableReader();
|
|
|
|
std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
// Test point lookup
|
|
|
|
// only one kv
|
|
|
|
for (auto& kv : kvmap) {
|
|
|
|
db_iter->Seek(kv.first);
|
|
|
|
ASSERT_TRUE(db_iter->Valid());
|
|
|
|
ASSERT_OK(db_iter->status());
|
|
|
|
ASSERT_EQ(db_iter->key(), kv.first);
|
|
|
|
ASSERT_EQ(db_iter->value(), kv.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
static std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-09-26 18:14:05 +02:00
|
|
|
void AddInternalKey(TableConstructor* c, const std::string& prefix,
|
2018-03-05 22:08:17 +01:00
|
|
|
int /*suffix_len*/ = 800) {
|
2014-04-10 23:19:43 +02:00
|
|
|
static Random rnd(1023);
|
|
|
|
InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue);
|
|
|
|
c->Add(k.Encode().ToString(), "v");
|
|
|
|
}
|
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
void TableTest::IndexTest(BlockBasedTableOptions table_options) {
|
2014-04-10 23:19:43 +02:00
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
|
|
|
|
// keys with prefix length 3, make sure the key/value is big enough to fill
|
|
|
|
// one block
|
|
|
|
AddInternalKey(&c, "0015");
|
|
|
|
AddInternalKey(&c, "0035");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0054");
|
|
|
|
AddInternalKey(&c, "0055");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0056");
|
|
|
|
AddInternalKey(&c, "0057");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0058");
|
|
|
|
AddInternalKey(&c, "0075");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0076");
|
|
|
|
AddInternalKey(&c, "0095");
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-04-10 23:19:43 +02:00
|
|
|
Options options;
|
2014-08-25 23:22:05 +02:00
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(3));
|
|
|
|
table_options.block_size = 1700;
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1024, 4);
|
2014-08-25 23:22:05 +02:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
std::unique_ptr<InternalKeyComparator> comparator(
|
|
|
|
new InternalKeyComparator(BytewiseComparator()));
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto reader = c.GetTableReader();
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
auto props = reader->GetTableProperties();
|
2014-04-10 23:19:43 +02:00
|
|
|
ASSERT_EQ(5u, props->num_data_blocks);
|
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
std::unique_ptr<InternalIterator> index_iter(
|
2015-10-13 00:06:38 +02:00
|
|
|
reader->NewIterator(ReadOptions()));
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// -- Find keys do not exist, but have common prefix.
|
|
|
|
std::vector<std::string> prefixes = {"001", "003", "005", "007", "009"};
|
|
|
|
std::vector<std::string> lower_bound = {keys[0], keys[1], keys[2],
|
|
|
|
keys[7], keys[9], };
|
|
|
|
|
|
|
|
// find the lower bound of the prefix
|
|
|
|
for (size_t i = 0; i < prefixes.size(); ++i) {
|
2017-02-07 01:29:29 +01:00
|
|
|
index_iter->Seek(InternalKey(prefixes[i], 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(index_iter->status());
|
|
|
|
ASSERT_TRUE(index_iter->Valid());
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// seek the first element in the block
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_EQ(lower_bound[i], index_iter->key().ToString());
|
|
|
|
ASSERT_EQ("v", index_iter->value().ToString());
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// find the upper bound of prefixes
|
|
|
|
std::vector<std::string> upper_bound = {keys[1], keys[2], keys[7], keys[9], };
|
|
|
|
|
|
|
|
// find existing keys
|
|
|
|
for (const auto& item : kvmap) {
|
|
|
|
auto ukey = ExtractUserKey(item.first).ToString();
|
2017-02-07 01:29:29 +01:00
|
|
|
index_iter->Seek(ukey);
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// ASSERT_OK(regular_iter->status());
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_OK(index_iter->status());
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// ASSERT_TRUE(regular_iter->Valid());
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_TRUE(index_iter->Valid());
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_EQ(item.first, index_iter->key().ToString());
|
|
|
|
ASSERT_EQ(item.second, index_iter->value().ToString());
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < prefixes.size(); ++i) {
|
|
|
|
// the key is greater than any existing keys.
|
|
|
|
auto key = prefixes[i] + "9";
|
2017-02-07 01:29:29 +01:00
|
|
|
index_iter->Seek(InternalKey(key, 0, kTypeValue).Encode());
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_OK(index_iter->status());
|
2014-04-10 23:19:43 +02:00
|
|
|
if (i == prefixes.size() - 1) {
|
|
|
|
// last key
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_TRUE(!index_iter->Valid());
|
2014-04-10 23:19:43 +02:00
|
|
|
} else {
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_TRUE(index_iter->Valid());
|
2014-04-10 23:19:43 +02:00
|
|
|
// seek the first element in the block
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_EQ(upper_bound[i], index_iter->key().ToString());
|
|
|
|
ASSERT_EQ("v", index_iter->value().ToString());
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find keys with prefix that don't match any of the existing prefixes.
|
|
|
|
std::vector<std::string> non_exist_prefixes = {"002", "004", "006", "008"};
|
|
|
|
for (const auto& prefix : non_exist_prefixes) {
|
2017-02-07 01:29:29 +01:00
|
|
|
index_iter->Seek(InternalKey(prefix, 0, kTypeValue).Encode());
|
2014-04-10 23:19:43 +02:00
|
|
|
// regular_iter->Seek(prefix);
|
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
ASSERT_OK(index_iter->status());
|
2014-06-13 04:03:22 +02:00
|
|
|
// Seek to non-existing prefixes should yield either invalid, or a
|
|
|
|
// key with prefix greater than the target.
|
2017-02-07 01:29:29 +01:00
|
|
|
if (index_iter->Valid()) {
|
|
|
|
Slice ukey = ExtractUserKey(index_iter->key());
|
2014-06-13 04:03:22 +02:00
|
|
|
Slice ukey_prefix = options.prefix_extractor->Transform(ukey);
|
|
|
|
ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) < 0);
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
TEST_F(TableTest, BinaryIndexTest) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
IndexTest(table_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(TableTest, HashIndexTest) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
IndexTest(table_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(TableTest, PartitionIndexTest) {
|
|
|
|
const int max_index_keys = 5;
|
2017-03-28 20:56:56 +02:00
|
|
|
const int est_max_index_key_value_size = 32;
|
|
|
|
const int est_max_index_size = max_index_keys * est_max_index_key_value_size;
|
|
|
|
for (int i = 1; i <= est_max_index_size + 1; i++) {
|
2017-02-07 01:29:29 +01:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
|
2017-03-28 20:56:56 +02:00
|
|
|
table_options.metadata_block_size = i;
|
2017-02-07 01:29:29 +01:00
|
|
|
IndexTest(table_options);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
// It's very hard to figure out the index block size of a block accurately.
|
|
|
|
// To make sure we get the index size, we just make sure as key number
|
|
|
|
// grows, the filter block size also grows.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, IndexSizeStat) {
|
2013-10-10 20:43:24 +02:00
|
|
|
uint64_t last_index_size = 0;
|
|
|
|
|
|
|
|
// we need to use random keys since the pure human readable texts
|
|
|
|
// may be well compressed, resulting insignifcant change of index
|
|
|
|
// block size.
|
|
|
|
Random rnd(test::RandomSeed());
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
|
|
|
|
for (int i = 0; i < 100; ++i) {
|
|
|
|
keys.push_back(RandomString(&rnd, 10000));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Each time we load one more key to the table. the table index block
|
|
|
|
// size is expected to be larger than last time's.
|
|
|
|
for (size_t i = 1; i < keys.size(); ++i) {
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(),
|
|
|
|
true /* convert_to_internal_key_ */);
|
2013-10-10 20:43:24 +02:00
|
|
|
for (size_t j = 0; j < i; ++j) {
|
|
|
|
c.Add(keys[j], "val");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> ks;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2013-10-10 20:43:24 +02:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto index_size = c.GetTableReader()->GetTableProperties()->index_size;
|
2013-10-10 20:43:24 +02:00
|
|
|
ASSERT_GT(index_size, last_index_size);
|
|
|
|
last_index_size = index_size;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, NumBlockStat) {
|
2013-10-10 20:43:24 +02:00
|
|
|
Random rnd(test::RandomSeed());
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-10-10 20:43:24 +02:00
|
|
|
Options options;
|
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
table_options.block_size = 1000;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
// the key/val are slightly smaller than block size, so that each block
|
|
|
|
// holds roughly one key/value pair.
|
|
|
|
c.Add(RandomString(&rnd, 900), "val");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> ks;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
|
2014-02-05 01:21:47 +01:00
|
|
|
ASSERT_EQ(kvmap.size(),
|
2014-08-26 01:14:30 +02:00
|
|
|
c.GetTableReader()->GetTableProperties()->num_data_blocks);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
// A simple tool that takes the snapshot of block cache statistics.
|
|
|
|
class BlockCachePropertiesSnapshot {
|
2013-11-13 07:46:51 +01:00
|
|
|
public:
|
2014-02-20 00:38:57 +01:00
|
|
|
explicit BlockCachePropertiesSnapshot(Statistics* statistics) {
|
2014-01-17 21:46:06 +01:00
|
|
|
block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_MISS);
|
|
|
|
block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_HIT);
|
|
|
|
index_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
|
|
|
|
index_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
|
|
|
|
data_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
|
|
|
|
data_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
|
2014-02-20 00:38:57 +01:00
|
|
|
filter_block_cache_miss =
|
|
|
|
statistics->getTickerCount(BLOCK_CACHE_FILTER_MISS);
|
|
|
|
filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT);
|
2015-10-08 00:17:20 +02:00
|
|
|
block_cache_bytes_read = statistics->getTickerCount(BLOCK_CACHE_BYTES_READ);
|
|
|
|
block_cache_bytes_write =
|
|
|
|
statistics->getTickerCount(BLOCK_CACHE_BYTES_WRITE);
|
2014-02-20 00:38:57 +01:00
|
|
|
}
|
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
void AssertIndexBlockStat(int64_t expected_index_block_cache_miss,
|
|
|
|
int64_t expected_index_block_cache_hit) {
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
2014-02-20 00:38:57 +01:00
|
|
|
}
|
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss,
|
|
|
|
int64_t expected_filter_block_cache_hit) {
|
|
|
|
ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2013-11-20 01:29:42 +01:00
|
|
|
// Check if the fetched props matches the expected ones.
|
2014-02-20 00:38:57 +01:00
|
|
|
// TODO(kailiu) Use this only when you disabled filter policy!
|
2014-10-31 19:59:54 +01:00
|
|
|
void AssertEqual(int64_t expected_index_block_cache_miss,
|
|
|
|
int64_t expected_index_block_cache_hit,
|
|
|
|
int64_t expected_data_block_cache_miss,
|
|
|
|
int64_t expected_data_block_cache_hit) const {
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
|
|
|
ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss,
|
|
|
|
block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit,
|
|
|
|
block_cache_hit);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2015-10-08 00:17:20 +02:00
|
|
|
int64_t GetCacheBytesRead() { return block_cache_bytes_read; }
|
|
|
|
|
|
|
|
int64_t GetCacheBytesWrite() { return block_cache_bytes_write; }
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
private:
|
2014-02-05 01:21:47 +01:00
|
|
|
int64_t block_cache_miss = 0;
|
|
|
|
int64_t block_cache_hit = 0;
|
|
|
|
int64_t index_block_cache_miss = 0;
|
|
|
|
int64_t index_block_cache_hit = 0;
|
|
|
|
int64_t data_block_cache_miss = 0;
|
|
|
|
int64_t data_block_cache_hit = 0;
|
2014-02-20 00:38:57 +01:00
|
|
|
int64_t filter_block_cache_miss = 0;
|
|
|
|
int64_t filter_block_cache_hit = 0;
|
2015-10-08 00:17:20 +02:00
|
|
|
int64_t block_cache_bytes_read = 0;
|
|
|
|
int64_t block_cache_bytes_write = 0;
|
2013-11-13 07:46:51 +01:00
|
|
|
};
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
// Make sure, by default, index/filter blocks were pre-loaded (meaning we won't
|
|
|
|
// use block cache to store them).
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, BlockCacheDisabledTest) {
|
2014-02-20 00:38:57 +01:00
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
BlockBasedTableOptions table_options;
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1024, 4);
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
2014-02-20 00:38:57 +01:00
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-02-20 00:38:57 +01:00
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2014-02-20 00:38:57 +01:00
|
|
|
c.Add("key", "value");
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-02-20 00:38:57 +01:00
|
|
|
|
|
|
|
// preloading filter/index blocks is enabled.
|
2014-08-26 01:14:30 +02:00
|
|
|
auto reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-02-20 00:38:57 +01:00
|
|
|
ASSERT_TRUE(reader->TEST_filter_block_preloaded());
|
2014-03-01 03:19:07 +01:00
|
|
|
ASSERT_TRUE(reader->TEST_index_reader_preloaded());
|
2014-02-20 00:38:57 +01:00
|
|
|
|
|
|
|
{
|
|
|
|
// nothing happens in the beginning
|
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertIndexBlockStat(0, 0);
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2014-09-29 20:09:09 +02:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
2015-03-03 19:59:36 +01:00
|
|
|
GetContext::kNotFound, Slice(), nullptr, nullptr,
|
2016-11-04 02:40:23 +01:00
|
|
|
nullptr, nullptr, nullptr);
|
2014-02-20 00:38:57 +01:00
|
|
|
// a hack that just to trigger BlockBasedTable::GetFilter.
|
2014-09-29 20:09:09 +02:00
|
|
|
reader->Get(ReadOptions(), "non-exist-key", &get_context);
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertIndexBlockStat(0, 0);
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Due to the difficulities of the intersaction between statistics, this test
|
|
|
|
// only tests the case when "index block is put to block cache"
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
2013-11-13 07:46:51 +01:00
|
|
|
// -- Table construction
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2013-11-13 07:46:51 +01:00
|
|
|
options.create_if_missing = true;
|
2017-05-02 22:39:09 +02:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-01-24 19:57:15 +01:00
|
|
|
|
|
|
|
// Enable the cache for index/filter blocks
|
|
|
|
BlockBasedTableOptions table_options;
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1024, 4);
|
2014-01-24 19:57:15 +01:00
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2013-11-13 07:46:51 +01:00
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-11-13 07:46:51 +01:00
|
|
|
c.Add("key", "value");
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:22:05 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-02-20 00:38:57 +01:00
|
|
|
// preloading filter/index blocks is prohibited.
|
2014-10-22 20:52:35 +02:00
|
|
|
auto* reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-02-20 00:38:57 +01:00
|
|
|
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
|
2014-03-01 03:19:07 +01:00
|
|
|
ASSERT_TRUE(!reader->TEST_index_reader_preloaded());
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
// -- PART 1: Open with regular block cache.
|
|
|
|
// Since block_cache is disabled, no cache activities will be involved.
|
2015-10-13 00:06:38 +02:00
|
|
|
unique_ptr<InternalIterator> iter;
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2015-10-08 00:17:20 +02:00
|
|
|
int64_t last_cache_bytes_read = 0;
|
2013-11-13 07:46:51 +01:00
|
|
|
// At first, no block will be accessed.
|
|
|
|
{
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2013-11-13 07:46:51 +01:00
|
|
|
// index will be added to block cache.
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, // index block miss
|
|
|
|
0, 0, 0);
|
2015-10-08 00:17:20 +02:00
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
|
|
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only index block will be accessed
|
|
|
|
{
|
|
|
|
iter.reset(c.NewIterator());
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2013-11-13 07:46:51 +01:00
|
|
|
// NOTE: to help better highlight the "detla" of each ticker, I use
|
|
|
|
// <last_value> + <added_value> to indicate the increment of changed
|
|
|
|
// value; other numbers remain the same.
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, 0 + 1, // index block hit
|
|
|
|
0, 0);
|
2015-10-08 00:17:20 +02:00
|
|
|
// Cache hit, bytes read from cache should increase
|
|
|
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
|
|
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only data block will be accessed
|
|
|
|
{
|
|
|
|
iter->SeekToFirst();
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, 1, 0 + 1, // data block miss
|
|
|
|
0);
|
2015-10-08 00:17:20 +02:00
|
|
|
// Cache miss, Bytes read from cache should not change
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), last_cache_bytes_read);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
|
|
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Data block will be in cache
|
|
|
|
{
|
|
|
|
iter.reset(c.NewIterator());
|
|
|
|
iter->SeekToFirst();
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, 1 + 1, /* index block hit */
|
|
|
|
1, 0 + 1 /* data block hit */);
|
2015-10-08 00:17:20 +02:00
|
|
|
// Cache hit, bytes read from cache should increase
|
|
|
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
// release the iterator so that the block cache can reset correctly.
|
|
|
|
iter.reset();
|
2017-05-02 22:39:09 +02:00
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
|
|
|
|
2014-10-22 20:52:35 +02:00
|
|
|
// -- PART 2: Open with very small block cache
|
2013-11-13 07:46:51 +01:00
|
|
|
// In this test, no block will ever get hit since the block cache is
|
|
|
|
// too small to fit even one entry.
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(1, 4);
|
2017-05-02 22:39:09 +02:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-08-25 23:22:05 +02:00
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions2(options);
|
|
|
|
c.Reopen(ioptions2);
|
2013-11-13 07:46:51 +01:00
|
|
|
{
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1, // index block miss
|
|
|
|
0, 0, 0);
|
2015-10-08 00:17:20 +02:00
|
|
|
// Cache miss, Bytes read from cache should not change
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Both index and data block get accessed.
|
|
|
|
// It first cache index block then data block. But since the cache size
|
|
|
|
// is only 1, index block will be purged after data block is inserted.
|
|
|
|
iter.reset(c.NewIterator());
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(1 + 1, // index block miss
|
|
|
|
0, 0, // data block miss
|
|
|
|
0);
|
2015-10-08 00:17:20 +02:00
|
|
|
// Cache hit, bytes read from cache should increase
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// SeekToFirst() accesses data block. With similar reason, we expect data
|
|
|
|
// block's cache miss.
|
|
|
|
iter->SeekToFirst();
|
2014-02-20 00:38:57 +01:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 01:21:47 +01:00
|
|
|
props.AssertEqual(2, 0, 0 + 1, // data block miss
|
|
|
|
0);
|
2015-10-08 00:17:20 +02:00
|
|
|
// Cache miss, Bytes read from cache should not change
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2014-10-22 20:52:35 +02:00
|
|
|
iter.reset();
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2014-10-22 20:52:35 +02:00
|
|
|
|
|
|
|
// -- PART 3: Open table with bloom filter enabled but not in SST file
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(4096, 4);
|
2014-10-22 20:52:35 +02:00
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c3(BytewiseComparator());
|
2014-10-22 22:53:35 +02:00
|
|
|
std::string user_key = "k01";
|
|
|
|
InternalKey internal_key(user_key, 0, kTypeValue);
|
|
|
|
c3.Add(internal_key.Encode().ToString(), "hello");
|
2014-10-22 20:52:35 +02:00
|
|
|
ImmutableCFOptions ioptions3(options);
|
|
|
|
// Generate table without filter policy
|
|
|
|
c3.Finish(options, ioptions3, table_options,
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
c3.ResetTableReader();
|
|
|
|
|
2014-10-22 20:52:35 +02:00
|
|
|
// Open table with filter policy
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(1));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2017-05-02 22:39:09 +02:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-10-22 20:52:35 +02:00
|
|
|
ImmutableCFOptions ioptions4(options);
|
|
|
|
ASSERT_OK(c3.Reopen(ioptions4));
|
|
|
|
reader = dynamic_cast<BlockBasedTable*>(c3.GetTableReader());
|
|
|
|
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
|
2017-03-13 19:44:50 +01:00
|
|
|
PinnableSlice value;
|
2014-10-22 20:52:35 +02:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
2015-03-03 19:59:36 +01:00
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2016-11-04 02:40:23 +01:00
|
|
|
nullptr, nullptr, nullptr);
|
2014-10-22 22:53:35 +02:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), user_key, &get_context));
|
2017-03-13 19:44:50 +01:00
|
|
|
ASSERT_STREQ(value.data(), "hello");
|
2014-10-22 20:52:35 +02:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c3.ResetTableReader();
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2016-01-04 19:51:00 +01:00
|
|
|
void ValidateBlockSizeDeviation(int value, int expected) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size_deviation = value;
|
|
|
|
BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options);
|
|
|
|
|
|
|
|
const BlockBasedTableOptions* normalized_table_options =
|
|
|
|
(const BlockBasedTableOptions*)factory->GetOptions();
|
|
|
|
ASSERT_EQ(normalized_table_options->block_size_deviation, expected);
|
|
|
|
|
|
|
|
delete factory;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ValidateBlockRestartInterval(int value, int expected) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = value;
|
|
|
|
BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options);
|
|
|
|
|
|
|
|
const BlockBasedTableOptions* normalized_table_options =
|
|
|
|
(const BlockBasedTableOptions*)factory->GetOptions();
|
|
|
|
ASSERT_EQ(normalized_table_options->block_restart_interval, expected);
|
|
|
|
|
|
|
|
delete factory;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlockBasedTableTest, InvalidOptions) {
|
|
|
|
// invalid values for block_size_deviation (<0 or >100) are silently set to 0
|
|
|
|
ValidateBlockSizeDeviation(-10, 0);
|
|
|
|
ValidateBlockSizeDeviation(-1, 0);
|
|
|
|
ValidateBlockSizeDeviation(0, 0);
|
|
|
|
ValidateBlockSizeDeviation(1, 1);
|
|
|
|
ValidateBlockSizeDeviation(99, 99);
|
|
|
|
ValidateBlockSizeDeviation(100, 100);
|
|
|
|
ValidateBlockSizeDeviation(101, 0);
|
|
|
|
ValidateBlockSizeDeviation(1000, 0);
|
|
|
|
|
|
|
|
// invalid values for block_restart_interval (<1) are silently set to 1
|
|
|
|
ValidateBlockRestartInterval(-10, 1);
|
|
|
|
ValidateBlockRestartInterval(-1, 1);
|
|
|
|
ValidateBlockRestartInterval(0, 1);
|
|
|
|
ValidateBlockRestartInterval(1, 1);
|
|
|
|
ValidateBlockRestartInterval(2, 2);
|
|
|
|
ValidateBlockRestartInterval(1000, 1000);
|
|
|
|
}
|
|
|
|
|
2015-09-03 00:36:47 +02:00
|
|
|
TEST_F(BlockBasedTableTest, BlockReadCountTest) {
|
|
|
|
// bloom_filter_type = 0 -- block-based filter
|
|
|
|
// bloom_filter_type = 0 -- full filter
|
|
|
|
for (int bloom_filter_type = 0; bloom_filter_type < 2; ++bloom_filter_type) {
|
|
|
|
for (int index_and_filter_in_cache = 0; index_and_filter_in_cache < 2;
|
|
|
|
++index_and_filter_in_cache) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = NewLRUCache(1, 0);
|
|
|
|
table_options.cache_index_and_filter_blocks = index_and_filter_in_cache;
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
NewBloomFilterPolicy(10, bloom_filter_type == 0));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-03 01:05:53 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2015-09-03 00:36:47 +02:00
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
std::string user_key = "k04";
|
|
|
|
InternalKey internal_key(user_key, 0, kTypeValue);
|
|
|
|
std::string encoded_key = internal_key.Encode().ToString();
|
|
|
|
c.Add(encoded_key, "hello");
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
// Generate table with filter policy
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
auto reader = c.GetTableReader();
|
2017-03-13 19:44:50 +01:00
|
|
|
PinnableSlice value;
|
2015-09-03 00:36:47 +02:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2016-11-04 02:40:23 +01:00
|
|
|
nullptr, nullptr, nullptr);
|
2017-06-03 02:12:39 +02:00
|
|
|
get_perf_context()->Reset();
|
2015-09-03 00:36:47 +02:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context));
|
|
|
|
if (index_and_filter_in_cache) {
|
|
|
|
// data, index and filter block
|
2017-06-03 02:12:39 +02:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 3);
|
2015-09-03 00:36:47 +02:00
|
|
|
} else {
|
|
|
|
// just the data block
|
2017-06-03 02:12:39 +02:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 1);
|
2015-09-03 00:36:47 +02:00
|
|
|
}
|
|
|
|
ASSERT_EQ(get_context.State(), GetContext::kFound);
|
2017-03-13 19:44:50 +01:00
|
|
|
ASSERT_STREQ(value.data(), "hello");
|
2015-09-03 00:36:47 +02:00
|
|
|
|
|
|
|
// Get non-existing key
|
|
|
|
user_key = "does-not-exist";
|
|
|
|
internal_key = InternalKey(user_key, 0, kTypeValue);
|
|
|
|
encoded_key = internal_key.Encode().ToString();
|
|
|
|
|
2017-03-13 19:44:50 +01:00
|
|
|
value.Reset();
|
2015-09-03 00:36:47 +02:00
|
|
|
get_context = GetContext(options.comparator, nullptr, nullptr, nullptr,
|
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2016-11-04 02:40:23 +01:00
|
|
|
nullptr, nullptr, nullptr);
|
2017-06-03 02:12:39 +02:00
|
|
|
get_perf_context()->Reset();
|
2015-09-03 00:36:47 +02:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context));
|
|
|
|
ASSERT_EQ(get_context.State(), GetContext::kNotFound);
|
|
|
|
|
|
|
|
if (index_and_filter_in_cache) {
|
|
|
|
if (bloom_filter_type == 0) {
|
|
|
|
// with block-based, we read index and then the filter
|
2017-06-03 02:12:39 +02:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 2);
|
2015-09-03 00:36:47 +02:00
|
|
|
} else {
|
|
|
|
// with full-filter, we read filter first and then we stop
|
2017-06-03 02:12:39 +02:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 1);
|
2015-09-03 00:36:47 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// filter is already in memory and it figures out that the key doesn't
|
|
|
|
// exist
|
2017-06-03 02:12:39 +02:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 0);
|
2015-09-03 00:36:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-06 05:10:56 +02:00
|
|
|
// A wrapper around LRICache that also keeps track of data blocks (in contrast
|
|
|
|
// with the objects) in the cache. The class is very simple and can be used only
|
|
|
|
// for trivial tests.
|
|
|
|
class MockCache : public LRUCache {
|
|
|
|
public:
|
|
|
|
MockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
|
|
double high_pri_pool_ratio)
|
|
|
|
: LRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
|
|
|
high_pri_pool_ratio) {}
|
|
|
|
virtual Status Insert(const Slice& key, void* value, size_t charge,
|
|
|
|
void (*deleter)(const Slice& key, void* value),
|
|
|
|
Handle** handle = nullptr,
|
|
|
|
Priority priority = Priority::LOW) override {
|
|
|
|
// Replace the deleter with our own so that we keep track of data blocks
|
|
|
|
// erased from the cache
|
|
|
|
deleters_[key.ToString()] = deleter;
|
|
|
|
return ShardedCache::Insert(key, value, charge, &MockDeleter, handle,
|
|
|
|
priority);
|
|
|
|
}
|
|
|
|
// This is called by the application right after inserting a data block
|
|
|
|
virtual void TEST_mark_as_data_block(const Slice& key,
|
|
|
|
size_t charge) override {
|
|
|
|
marked_data_in_cache_[key.ToString()] = charge;
|
|
|
|
marked_size_ += charge;
|
|
|
|
}
|
|
|
|
using DeleterFunc = void (*)(const Slice& key, void* value);
|
|
|
|
static std::map<std::string, DeleterFunc> deleters_;
|
|
|
|
static std::map<std::string, size_t> marked_data_in_cache_;
|
|
|
|
static size_t marked_size_;
|
|
|
|
static void MockDeleter(const Slice& key, void* value) {
|
|
|
|
// If the item was marked for being data block, decrease its usage from the
|
|
|
|
// total data block usage of the cache
|
|
|
|
if (marked_data_in_cache_.find(key.ToString()) !=
|
|
|
|
marked_data_in_cache_.end()) {
|
|
|
|
marked_size_ -= marked_data_in_cache_[key.ToString()];
|
|
|
|
}
|
|
|
|
// Then call the origianl deleter
|
|
|
|
assert(deleters_.find(key.ToString()) != deleters_.end());
|
|
|
|
auto deleter = deleters_[key.ToString()];
|
|
|
|
deleter(key, value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t MockCache::marked_size_ = 0;
|
|
|
|
std::map<std::string, MockCache::DeleterFunc> MockCache::deleters_;
|
|
|
|
std::map<std::string, size_t> MockCache::marked_data_in_cache_;
|
|
|
|
|
|
|
|
// Block cache can contain raw data blocks as well as general objects. If an
|
|
|
|
// object depends on the table to be live, it then must be destructed before the
|
2017-08-18 19:53:03 +02:00
|
|
|
// table is closed. This test makes sure that the only items remains in the
|
2017-05-06 05:10:56 +02:00
|
|
|
// cache after the table is closed are raw data blocks.
|
|
|
|
TEST_F(BlockBasedTableTest, NoObjectInCacheAfterTableClose) {
|
2018-04-10 01:17:15 +02:00
|
|
|
for (int level: {-1, 0, 1, 10}) {
|
2017-05-06 05:10:56 +02:00
|
|
|
for (auto index_type :
|
|
|
|
{BlockBasedTableOptions::IndexType::kBinarySearch,
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch}) {
|
|
|
|
for (bool block_based_filter : {true, false}) {
|
|
|
|
for (bool partition_filter : {true, false}) {
|
|
|
|
if (partition_filter &&
|
|
|
|
(block_based_filter ||
|
|
|
|
index_type !=
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (bool index_and_filter_in_cache : {true, false}) {
|
|
|
|
for (bool pin_l0 : {true, false}) {
|
|
|
|
if (pin_l0 && !index_and_filter_in_cache) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Create a table
|
|
|
|
Options opt;
|
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
|
|
|
opt.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
table_options.index_type =
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
|
|
|
|
table_options.pin_l0_filter_and_index_blocks_in_cache = pin_l0;
|
|
|
|
table_options.partition_filters = partition_filter;
|
|
|
|
table_options.cache_index_and_filter_blocks =
|
|
|
|
index_and_filter_in_cache;
|
|
|
|
// big enough so we don't ever lose cached values.
|
|
|
|
table_options.block_cache = std::shared_ptr<rocksdb::Cache>(
|
|
|
|
new MockCache(16 * 1024 * 1024, 4, false, 0.0));
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
rocksdb::NewBloomFilterPolicy(10, block_based_filter));
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
2018-04-10 01:17:15 +02:00
|
|
|
bool convert_to_internal_key = false;
|
|
|
|
TableConstructor c(BytewiseComparator(), convert_to_internal_key,
|
|
|
|
level);
|
2017-05-06 05:10:56 +02:00
|
|
|
std::string user_key = "k01";
|
|
|
|
std::string key =
|
|
|
|
InternalKey(user_key, 0, kTypeValue).Encode().ToString();
|
|
|
|
c.Add(key, "hello");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
|
|
|
|
|
|
|
// Doing a read to make index/filter loaded into the cache
|
|
|
|
auto table_reader =
|
|
|
|
dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
|
|
|
PinnableSlice value;
|
|
|
|
GetContext get_context(opt.comparator, nullptr, nullptr, nullptr,
|
|
|
|
GetContext::kNotFound, user_key, &value,
|
|
|
|
nullptr, nullptr, nullptr, nullptr);
|
|
|
|
InternalKey ikey(user_key, 0, kTypeValue);
|
|
|
|
auto s = table_reader->Get(ReadOptions(), key, &get_context);
|
|
|
|
ASSERT_EQ(get_context.State(), GetContext::kFound);
|
|
|
|
ASSERT_STREQ(value.data(), "hello");
|
|
|
|
|
|
|
|
// Close the table
|
|
|
|
c.ResetTableReader();
|
|
|
|
|
|
|
|
auto usage = table_options.block_cache->GetUsage();
|
|
|
|
auto pinned_usage = table_options.block_cache->GetPinnedUsage();
|
|
|
|
// The only usage must be for marked data blocks
|
|
|
|
ASSERT_EQ(usage, MockCache::marked_size_);
|
|
|
|
// There must be some pinned data since PinnableSlice has not
|
|
|
|
// released them yet
|
|
|
|
ASSERT_GT(pinned_usage, 0);
|
|
|
|
// Release pinnable slice reousrces
|
|
|
|
value.Reset();
|
|
|
|
pinned_usage = table_options.block_cache->GetPinnedUsage();
|
|
|
|
ASSERT_EQ(pinned_usage, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-04-10 01:17:15 +02:00
|
|
|
} // level
|
2017-05-06 05:10:56 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockBasedTableTest, BlockCacheLeak) {
|
2014-01-24 21:14:08 +01:00
|
|
|
// Check that when we reopen a table we don't lose access to blocks already
|
|
|
|
// in the cache. This test checks whether the Table actually makes use of the
|
|
|
|
// unique ID from the file.
|
|
|
|
|
|
|
|
Options opt;
|
2014-01-27 22:53:22 +01:00
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
2014-01-24 21:14:08 +01:00
|
|
|
opt.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
// big enough so we don't ever lose cached values.
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2014-08-25 23:22:05 +02:00
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2014-01-24 21:14:08 +01:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
unique_ptr<InternalIterator> iter(c.NewIterator());
|
2014-01-24 21:14:08 +01:00
|
|
|
iter->SeekToFirst();
|
|
|
|
while (iter->Valid()) {
|
|
|
|
iter->key();
|
|
|
|
iter->value();
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_OK(iter->status());
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 11:44:14 +02:00
|
|
|
iter.reset();
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions1(opt);
|
|
|
|
ASSERT_OK(c.Reopen(ioptions1));
|
2014-08-26 01:14:30 +02:00
|
|
|
auto table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-01-25 06:10:19 +01:00
|
|
|
for (const std::string& key : keys) {
|
2014-01-28 06:58:46 +01:00
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
|
2014-01-24 21:14:08 +01:00
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2014-06-20 10:23:02 +02:00
|
|
|
|
|
|
|
// rerun with different block cache
|
2016-04-07 22:51:47 +02:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2014-08-25 23:22:05 +02:00
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions2(opt);
|
|
|
|
ASSERT_OK(c.Reopen(ioptions2));
|
2014-08-26 01:14:30 +02:00
|
|
|
table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-06-20 10:23:02 +02:00
|
|
|
for (const std::string& key : keys) {
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2014-01-24 21:14:08 +01:00
|
|
|
}
|
|
|
|
|
2016-08-24 03:20:41 +02:00
|
|
|
TEST_F(BlockBasedTableTest, NewIndexIteratorLeak) {
|
|
|
|
// A regression test to avoid data race described in
|
|
|
|
// https://github.com/facebook/rocksdb/issues/1267
|
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
c.Add("a1", "val1");
|
|
|
|
Options options;
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.block_cache = NewLRUCache(0);
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
|
|
|
|
{
|
|
|
|
{"BlockBasedTable::NewIndexIterator::thread1:1",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread2:2"},
|
|
|
|
{"BlockBasedTable::NewIndexIterator::thread2:3",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread1:4"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread1:1"},
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread1:4"},
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread2:2"},
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread2:3"},
|
|
|
|
});
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
ReadOptions ro;
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
|
|
|
|
std::function<void()> func1 = [&]() {
|
|
|
|
TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker");
|
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
|
|
|
iter->Seek(InternalKey("a1", 0, kTypeValue).Encode());
|
|
|
|
};
|
|
|
|
|
|
|
|
std::function<void()> func2 = [&]() {
|
|
|
|
TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker");
|
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
|
|
|
};
|
|
|
|
|
2017-02-06 23:43:55 +01:00
|
|
|
auto thread1 = port::Thread(func1);
|
|
|
|
auto thread2 = port::Thread(func2);
|
2016-08-24 03:20:41 +02:00
|
|
|
thread1.join();
|
|
|
|
thread2.join();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
|
|
|
|
2015-07-20 20:09:14 +02:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(PlainTableTest, BasicPlainTableProperties) {
|
2014-07-18 09:08:38 +02:00
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 8;
|
|
|
|
plain_table_options.bloom_bits_per_key = 8;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
PlainTableFactory factory(plain_table_options);
|
2015-08-05 16:33:27 +02:00
|
|
|
test::StringSink sink;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<WritableFileWriter> file_writer(
|
2015-08-05 16:33:27 +02:00
|
|
|
test::GetWritableFileWriter(new test::StringSink()));
|
2014-01-27 22:53:22 +01:00
|
|
|
Options options;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
2014-01-27 22:53:22 +01:00
|
|
|
InternalKeyComparator ikc(options.comparator);
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
2016-04-07 08:10:32 +02:00
|
|
|
std::string column_family_name;
|
2016-09-18 07:30:43 +02:00
|
|
|
int unknown_level = -1;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
std::unique_ptr<TableBuilder> builder(factory.NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
|
2016-04-07 08:10:32 +02:00
|
|
|
kNoCompression, CompressionOptions(),
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
nullptr /* compression_dict */,
|
2016-09-18 07:30:43 +02:00
|
|
|
false /* skip_filters */, column_family_name,
|
|
|
|
unknown_level),
|
2015-10-09 01:57:35 +02:00
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer.get()));
|
2014-01-24 21:14:08 +01:00
|
|
|
|
|
|
|
for (char c = 'a'; c <= 'z'; ++c) {
|
2014-01-27 22:53:22 +01:00
|
|
|
std::string key(8, c);
|
|
|
|
key.append("\1 "); // PlainTable expects internal key structure
|
2014-01-24 21:14:08 +01:00
|
|
|
std::string value(28, c + 42);
|
|
|
|
builder->Add(key, value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(builder->Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
file_writer->Flush();
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2015-08-05 16:33:27 +02:00
|
|
|
test::StringSink* ss =
|
|
|
|
static_cast<test::StringSink*>(file_writer->writable_file());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
2015-08-05 16:33:27 +02:00
|
|
|
new test::StringSource(ss->contents(), 72242, true)));
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
TableProperties* props = nullptr;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
auto s = ReadTableProperties(file_reader.get(), ss->contents().size(),
|
2016-07-19 18:44:03 +02:00
|
|
|
kPlainTableMagicNumber, ioptions,
|
2014-01-25 06:10:19 +01:00
|
|
|
&props);
|
2014-02-08 07:43:58 +01:00
|
|
|
std::unique_ptr<TableProperties> props_guard(props);
|
2014-01-24 21:14:08 +01:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
ASSERT_EQ(0ul, props->index_size);
|
|
|
|
ASSERT_EQ(0ul, props->filter_size);
|
|
|
|
ASSERT_EQ(16ul * 26, props->raw_key_size);
|
|
|
|
ASSERT_EQ(28ul * 26, props->raw_value_size);
|
|
|
|
ASSERT_EQ(26ul, props->num_entries);
|
|
|
|
ASSERT_EQ(1ul, props->num_data_blocks);
|
2014-01-24 21:14:08 +01:00
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2014-01-24 21:14:08 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) {
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2011-03-18 23:37:00 +01:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2014-01-27 22:53:22 +01:00
|
|
|
test::PlainInternalKeyComparator internal_comparator(options.comparator);
|
2011-03-18 23:37:00 +01:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator,
|
|
|
|
&keys, &kvmap);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
|
2016-08-20 00:10:31 +02:00
|
|
|
// k04 and k05 will be in two consecutive blocks, the index is
|
|
|
|
// an arbitrary slice between k04 and k05, either before or after k04a
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 10000, 211000));
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
|
2012-04-17 17:36:46 +02:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-01-24 20:09:04 +01:00
|
|
|
static void DoCompressionTest(CompressionType comp) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Random rnd(301);
|
2016-08-20 00:10:31 +02:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string tmp;
|
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
|
|
|
|
c.Add("k03", "hello3");
|
|
|
|
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 07:00:48 +01:00
|
|
|
Options options;
|
2014-01-27 22:53:22 +01:00
|
|
|
test::PlainInternalKeyComparator ikc(options.comparator);
|
2012-06-28 08:41:33 +02:00
|
|
|
options.compression = comp;
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, ikc, &keys, &kvmap);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000));
|
2013-12-20 18:35:24 +01:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6100));
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) {
|
2014-02-11 02:02:02 +01:00
|
|
|
std::vector<CompressionType> compression_state;
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!Snappy_Supported()) {
|
2012-06-28 08:41:33 +02:00
|
|
|
fprintf(stderr, "skipping snappy compression tests\n");
|
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kSnappyCompression);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!Zlib_Supported()) {
|
2012-06-28 08:41:33 +02:00
|
|
|
fprintf(stderr, "skipping zlib compression tests\n");
|
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kZlibCompression);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
|
2014-02-11 02:02:02 +01:00
|
|
|
// TODO(kailiu) DoCompressionTest() doesn't work with BZip2.
|
|
|
|
/*
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!BZip2_Supported()) {
|
2014-02-08 03:12:30 +01:00
|
|
|
fprintf(stderr, "skipping bzip2 compression tests\n");
|
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kBZip2Compression);
|
2014-02-08 03:12:30 +01:00
|
|
|
}
|
2014-02-11 02:02:02 +01:00
|
|
|
*/
|
2014-02-08 03:12:30 +01:00
|
|
|
|
2015-04-06 21:50:44 +02:00
|
|
|
if (!LZ4_Supported()) {
|
|
|
|
fprintf(stderr, "skipping lz4 and lz4hc compression tests\n");
|
2014-02-08 03:12:30 +01:00
|
|
|
} else {
|
2014-02-11 02:02:02 +01:00
|
|
|
compression_state.push_back(kLZ4Compression);
|
|
|
|
compression_state.push_back(kLZ4HCCompression);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
|
2016-04-20 07:54:24 +02:00
|
|
|
if (!XPRESS_Supported()) {
|
|
|
|
fprintf(stderr, "skipping xpress and xpress compression tests\n");
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
compression_state.push_back(kXpressCompression);
|
|
|
|
}
|
|
|
|
|
2014-02-11 02:02:02 +01:00
|
|
|
for (auto state : compression_state) {
|
|
|
|
DoCompressionTest(state);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-06 22:54:53 +01:00
|
|
|
// RandomizedHarnessTest is very slow for certain combination of arguments
|
|
|
|
// Split into 8 pieces to reduce the time individual tests take.
|
2018-05-05 00:14:54 +02:00
|
|
|
TEST_F(HarnessTest, Randomized1) {
|
|
|
|
// part 1 out of 8
|
2018-01-29 16:36:05 +01:00
|
|
|
const size_t part = 1;
|
2018-02-06 22:54:53 +01:00
|
|
|
const size_t total = 8;
|
2018-01-29 16:36:05 +01:00
|
|
|
RandomizedHarnessTest(part, total);
|
|
|
|
}
|
|
|
|
|
2018-05-05 00:14:54 +02:00
|
|
|
TEST_F(HarnessTest, Randomized2) {
|
|
|
|
// part 2 out of 8
|
|
|
|
const size_t part = 2;
|
|
|
|
const size_t total = 8;
|
|
|
|
RandomizedHarnessTest(part, total);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(HarnessTest, Randomized3) {
|
|
|
|
// part 3 out of 8
|
2018-02-06 22:54:53 +01:00
|
|
|
const size_t part = 3;
|
|
|
|
const size_t total = 8;
|
|
|
|
RandomizedHarnessTest(part, total);
|
2018-05-05 00:14:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(HarnessTest, Randomized4) {
|
|
|
|
// part 4 out of 8
|
|
|
|
const size_t part = 4;
|
|
|
|
const size_t total = 8;
|
|
|
|
RandomizedHarnessTest(part, total);
|
2018-02-06 22:54:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(HarnessTest, Randomized5) {
|
|
|
|
// part 5 out of 8
|
|
|
|
const size_t part = 5;
|
|
|
|
const size_t total = 8;
|
|
|
|
RandomizedHarnessTest(part, total);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(HarnessTest, Randomized6) {
|
|
|
|
// part 6 out of 8
|
|
|
|
const size_t part = 6;
|
|
|
|
const size_t total = 8;
|
|
|
|
RandomizedHarnessTest(part, total);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(HarnessTest, Randomized7) {
|
|
|
|
// part 7 out of 8
|
|
|
|
const size_t part = 7;
|
|
|
|
const size_t total = 8;
|
|
|
|
RandomizedHarnessTest(part, total);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(HarnessTest, Randomized8) {
|
|
|
|
// part 8 out of 8
|
|
|
|
const size_t part = 8;
|
|
|
|
const size_t total = 8;
|
2018-01-29 16:36:05 +01:00
|
|
|
RandomizedHarnessTest(part, total);
|
2013-11-10 10:17:32 +01:00
|
|
|
}
|
|
|
|
|
2015-10-13 19:32:05 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, RandomizedLongDB) {
|
2013-11-10 10:17:32 +01:00
|
|
|
Random rnd(test::RandomSeed());
|
2015-12-30 14:49:06 +01:00
|
|
|
TestArgs args = {DB_TEST, false, 16, kNoCompression, 0, false};
|
2013-11-10 10:17:32 +01:00
|
|
|
Init(args);
|
|
|
|
int num_entries = 100000;
|
|
|
|
for (int e = 0; e < num_entries; e++) {
|
|
|
|
std::string v;
|
|
|
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
|
|
|
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
|
|
}
|
|
|
|
Test(&rnd);
|
|
|
|
|
|
|
|
// We must have created enough data to force merging
|
|
|
|
int files = 0;
|
|
|
|
for (int level = 0; level < db()->NumberLevels(); level++) {
|
|
|
|
std::string value;
|
|
|
|
char name[100];
|
|
|
|
snprintf(name, sizeof(name), "rocksdb.num-files-at-level%d", level);
|
|
|
|
ASSERT_TRUE(db()->GetProperty(name, &value));
|
|
|
|
files += atoi(value.c_str());
|
|
|
|
}
|
|
|
|
ASSERT_GT(files, 0);
|
|
|
|
}
|
2015-10-13 19:32:05 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class MemTableTest : public testing::Test {};
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(MemTableTest, Simple) {
|
2013-11-10 10:17:32 +01:00
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
auto table_factory = std::make_shared<SkipListFactory>();
|
2014-01-15 00:32:37 +01:00
|
|
|
Options options;
|
|
|
|
options.memtable_factory = table_factory;
|
2014-10-02 01:19:16 +02:00
|
|
|
ImmutableCFOptions ioptions(options);
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
2017-06-02 21:08:01 +02:00
|
|
|
MemTable* memtable =
|
|
|
|
new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
|
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2013-11-10 10:17:32 +01:00
|
|
|
memtable->Ref();
|
|
|
|
WriteBatch batch;
|
|
|
|
WriteBatchInternal::SetSequence(&batch, 100);
|
|
|
|
batch.Put(std::string("k1"), std::string("v1"));
|
|
|
|
batch.Put(std::string("k2"), std::string("v2"));
|
|
|
|
batch.Put(std::string("k3"), std::string("v3"));
|
|
|
|
batch.Put(std::string("largekey"), std::string("vlarge"));
|
2016-09-12 23:14:40 +02:00
|
|
|
batch.DeleteRange(std::string("chi"), std::string("xigua"));
|
|
|
|
batch.DeleteRange(std::string("begin"), std::string("end"));
|
2014-11-18 19:20:10 +01:00
|
|
|
ColumnFamilyMemTablesDefault cf_mems_default(memtable);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
ASSERT_TRUE(
|
|
|
|
WriteBatchInternal::InsertInto(&batch, &cf_mems_default, nullptr).ok());
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2016-09-12 23:14:40 +02:00
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
Arena arena;
|
2016-11-19 23:14:35 +01:00
|
|
|
ScopedArenaIterator arena_iter_guard;
|
|
|
|
std::unique_ptr<InternalIterator> iter_guard;
|
|
|
|
InternalIterator* iter;
|
|
|
|
if (i == 0) {
|
|
|
|
iter = memtable->NewIterator(ReadOptions(), &arena);
|
|
|
|
arena_iter_guard.set(iter);
|
|
|
|
} else {
|
|
|
|
iter = memtable->NewRangeTombstoneIterator(ReadOptions());
|
|
|
|
iter_guard.reset(iter);
|
|
|
|
}
|
2016-11-21 21:07:09 +01:00
|
|
|
if (iter == nullptr) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-09-12 23:14:40 +02:00
|
|
|
iter->SeekToFirst();
|
|
|
|
while (iter->Valid()) {
|
|
|
|
fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
|
|
|
|
iter->value().ToString().c_str());
|
|
|
|
iter->Next();
|
|
|
|
}
|
2013-11-10 10:17:32 +01:00
|
|
|
}
|
|
|
|
|
2013-12-02 06:23:44 +01:00
|
|
|
delete memtable->Unref();
|
2013-11-10 10:17:32 +01:00
|
|
|
}
|
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
// Test the empty key
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleEmptyKey) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 1);
|
|
|
|
Add("", "v");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleSingle) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 2);
|
|
|
|
Add("abc", "v");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleMulti) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 3);
|
|
|
|
Add("abc", "v");
|
|
|
|
Add("abcd", "v");
|
|
|
|
Add("ac", "v2");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, SimpleSpecialKey) {
|
2014-01-24 20:09:04 +01:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 01:51:26 +01:00
|
|
|
Random rnd(test::RandomSeed() + 4);
|
|
|
|
Add("\xff\xff", "v3");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
2013-11-10 10:17:32 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(HarnessTest, FooterTests) {
|
2014-05-01 20:09:32 +02:00
|
|
|
{
|
|
|
|
// upconvert legacy block based
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kLegacyBlockBasedTableMagicNumber, 0);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 0U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// xxhash block based
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kBlockBasedTableMagicNumber, 1);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.set_checksum(kxxHash);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 1U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2014-05-01 20:09:32 +02:00
|
|
|
{
|
|
|
|
// upconvert legacy plain table
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kLegacyPlainTableMagicNumber, 0);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 0U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// xxhash block based
|
|
|
|
std::string encoded;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kPlainTableMagicNumber, 1);
|
2014-05-01 20:09:32 +02:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.set_checksum(kxxHash);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 23:33:04 +01:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 1U);
|
|
|
|
}
|
2015-07-20 20:09:14 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
2015-01-13 23:33:04 +01:00
|
|
|
{
|
|
|
|
// version == 2
|
|
|
|
std::string encoded;
|
|
|
|
Footer footer(kBlockBasedTableMagicNumber, 2);
|
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.version(), 2U);
|
2014-05-01 20:09:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-05 19:22:37 +01:00
|
|
|
class IndexBlockRestartIntervalTest
|
|
|
|
: public BlockBasedTableTest,
|
|
|
|
public ::testing::WithParamInterface<int> {
|
|
|
|
public:
|
|
|
|
static std::vector<int> GetRestartValues() { return {-1, 0, 1, 8, 16, 32}; }
|
|
|
|
};
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
IndexBlockRestartIntervalTest, IndexBlockRestartIntervalTest,
|
|
|
|
::testing::ValuesIn(IndexBlockRestartIntervalTest::GetRestartValues()));
|
|
|
|
|
|
|
|
TEST_P(IndexBlockRestartIntervalTest, IndexBlockRestartInterval) {
|
|
|
|
const int kKeysInTable = 10000;
|
|
|
|
const int kKeySize = 100;
|
|
|
|
const int kValSize = 500;
|
|
|
|
|
|
|
|
int index_block_restart_interval = GetParam();
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 64; // small block size to get big index block
|
|
|
|
table_options.index_block_restart_interval = index_block_restart_interval;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
static Random rnd(301);
|
|
|
|
for (int i = 0; i < kKeysInTable; i++) {
|
|
|
|
InternalKey k(RandomString(&rnd, kKeySize), 0, kTypeValue);
|
|
|
|
c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
std::unique_ptr<InternalKeyComparator> comparator(
|
|
|
|
new InternalKeyComparator(BytewiseComparator()));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
|
|
|
|
auto reader = c.GetTableReader();
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
// Test point lookup
|
|
|
|
for (auto& kv : kvmap) {
|
|
|
|
db_iter->Seek(kv.first);
|
|
|
|
|
|
|
|
ASSERT_TRUE(db_iter->Valid());
|
|
|
|
ASSERT_OK(db_iter->status());
|
|
|
|
ASSERT_EQ(db_iter->key(), kv.first);
|
|
|
|
ASSERT_EQ(db_iter->value(), kv.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test iterating
|
|
|
|
auto kv_iter = kvmap.begin();
|
|
|
|
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
|
|
|
|
ASSERT_EQ(db_iter->key(), kv_iter->first);
|
|
|
|
ASSERT_EQ(db_iter->value(), kv_iter->second);
|
|
|
|
kv_iter++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(kv_iter, kvmap.end());
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
c.ResetTableReader();
|
2016-02-05 19:22:37 +01:00
|
|
|
}
|
|
|
|
|
2016-02-23 01:33:26 +01:00
|
|
|
class PrefixTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
PrefixTest() : testing::Test() {}
|
|
|
|
~PrefixTest() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// A simple PrefixExtractor that only works for test PrefixAndWholeKeyTest
|
|
|
|
class TestPrefixExtractor : public rocksdb::SliceTransform {
|
|
|
|
public:
|
|
|
|
~TestPrefixExtractor() override{};
|
|
|
|
const char* Name() const override { return "TestPrefixExtractor"; }
|
|
|
|
|
|
|
|
rocksdb::Slice Transform(const rocksdb::Slice& src) const override {
|
|
|
|
assert(IsValid(src));
|
|
|
|
return rocksdb::Slice(src.data(), 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool InDomain(const rocksdb::Slice& src) const override {
|
|
|
|
assert(IsValid(src));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
bool InRange(const rocksdb::Slice& /*dst*/) const override { return true; }
|
2016-02-23 01:33:26 +01:00
|
|
|
|
|
|
|
bool IsValid(const rocksdb::Slice& src) const {
|
|
|
|
if (src.size() != 4) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[0] != '[') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[1] < '0' || src[1] > '9') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[2] != ']') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[3] < '0' || src[3] > '9') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(PrefixTest, PrefixAndWholeKeyTest) {
|
|
|
|
rocksdb::Options options;
|
|
|
|
options.compaction_style = rocksdb::kCompactionStyleUniversal;
|
|
|
|
options.num_levels = 20;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.optimize_filters_for_hits = false;
|
|
|
|
options.target_file_size_base = 268435456;
|
|
|
|
options.prefix_extractor = std::make_shared<TestPrefixExtractor>();
|
|
|
|
rocksdb::BlockBasedTableOptions bbto;
|
|
|
|
bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10));
|
|
|
|
bbto.block_size = 262144;
|
|
|
|
bbto.whole_key_filtering = true;
|
|
|
|
|
2016-04-11 21:15:46 +02:00
|
|
|
const std::string kDBPath = test::TmpDir() + "/table_prefix_test";
|
2016-02-23 01:33:26 +01:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
DestroyDB(kDBPath, options);
|
|
|
|
rocksdb::DB* db;
|
|
|
|
ASSERT_OK(rocksdb::DB::Open(options, kDBPath, &db));
|
|
|
|
|
|
|
|
// Create a bunch of keys with 10 filters.
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
std::string prefix = "[" + std::to_string(i) + "]";
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
std::string key = prefix + std::to_string(j);
|
|
|
|
db->Put(rocksdb::WriteOptions(), key, "1");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger compaction.
|
|
|
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
delete db;
|
|
|
|
// In the second round, turn whole_key_filtering off and expect
|
|
|
|
// rocksdb still works.
|
|
|
|
}
|
|
|
|
|
2016-10-19 01:59:37 +02:00
|
|
|
TEST_F(BlockBasedTableTest, TableWithGlobalSeqno) {
|
|
|
|
BlockBasedTableOptions bbto;
|
|
|
|
test::StringSink* sink = new test::StringSink();
|
|
|
|
unique_ptr<WritableFileWriter> file_writer(test::GetWritableFileWriter(sink));
|
|
|
|
Options options;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
InternalKeyComparator ikc(options.comparator);
|
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
|
|
|
int_tbl_prop_collector_factories.emplace_back(
|
|
|
|
new SstFileWriterPropertiesCollectorFactory(2 /* version */,
|
|
|
|
0 /* global_seqno*/));
|
|
|
|
std::string column_family_name;
|
|
|
|
std::unique_ptr<TableBuilder> builder(options.table_factory->NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
|
|
|
|
kNoCompression, CompressionOptions(),
|
|
|
|
nullptr /* compression_dict */,
|
|
|
|
false /* skip_filters */, column_family_name, -1),
|
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
|
|
|
file_writer.get()));
|
|
|
|
|
|
|
|
for (char c = 'a'; c <= 'z'; ++c) {
|
|
|
|
std::string key(8, c);
|
|
|
|
std::string value = key;
|
|
|
|
InternalKey ik(key, 0, kTypeValue);
|
|
|
|
|
|
|
|
builder->Add(ik.Encode(), value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(builder->Finish());
|
|
|
|
file_writer->Flush();
|
|
|
|
|
|
|
|
test::RandomRWStringSink ss_rw(sink);
|
|
|
|
uint32_t version;
|
|
|
|
uint64_t global_seqno;
|
|
|
|
uint64_t global_seqno_offset;
|
|
|
|
|
|
|
|
// Helper function to get version, global_seqno, global_seqno_offset
|
|
|
|
std::function<void()> GetVersionAndGlobalSeqno = [&]() {
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
|
|
|
new test::StringSource(ss_rw.contents(), 73342, true)));
|
|
|
|
|
|
|
|
TableProperties* props = nullptr;
|
|
|
|
ASSERT_OK(ReadTableProperties(file_reader.get(), ss_rw.contents().size(),
|
|
|
|
kBlockBasedTableMagicNumber, ioptions,
|
|
|
|
&props));
|
|
|
|
|
|
|
|
UserCollectedProperties user_props = props->user_collected_properties;
|
|
|
|
version = DecodeFixed32(
|
|
|
|
user_props[ExternalSstFilePropertyNames::kVersion].c_str());
|
|
|
|
global_seqno = DecodeFixed64(
|
|
|
|
user_props[ExternalSstFilePropertyNames::kGlobalSeqno].c_str());
|
|
|
|
global_seqno_offset =
|
|
|
|
props->properties_offsets[ExternalSstFilePropertyNames::kGlobalSeqno];
|
|
|
|
|
|
|
|
delete props;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Helper function to update the value of the global seqno in the file
|
|
|
|
std::function<void(uint64_t)> SetGlobalSeqno = [&](uint64_t val) {
|
|
|
|
std::string new_global_seqno;
|
|
|
|
PutFixed64(&new_global_seqno, val);
|
|
|
|
|
|
|
|
ASSERT_OK(ss_rw.Write(global_seqno_offset, new_global_seqno));
|
|
|
|
};
|
|
|
|
|
|
|
|
// Helper function to get the contents of the table InternalIterator
|
|
|
|
unique_ptr<TableReader> table_reader;
|
|
|
|
std::function<InternalIterator*()> GetTableInternalIter = [&]() {
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
|
|
|
new test::StringSource(ss_rw.contents(), 73342, true)));
|
|
|
|
|
|
|
|
options.table_factory->NewTableReader(
|
|
|
|
TableReaderOptions(ioptions, EnvOptions(), ikc), std::move(file_reader),
|
|
|
|
ss_rw.contents().size(), &table_reader);
|
|
|
|
|
|
|
|
return table_reader->NewIterator(ReadOptions());
|
|
|
|
};
|
|
|
|
|
|
|
|
GetVersionAndGlobalSeqno();
|
|
|
|
ASSERT_EQ(2, version);
|
|
|
|
ASSERT_EQ(0, global_seqno);
|
|
|
|
|
|
|
|
InternalIterator* iter = GetTableInternalIter();
|
|
|
|
char current_c = 'a';
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 0);
|
|
|
|
ASSERT_EQ(pik.user_key, iter->value());
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
|
|
|
|
current_c++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(current_c, 'z' + 1);
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// Update global sequence number to 10
|
|
|
|
SetGlobalSeqno(10);
|
|
|
|
GetVersionAndGlobalSeqno();
|
|
|
|
ASSERT_EQ(2, version);
|
|
|
|
ASSERT_EQ(10, global_seqno);
|
|
|
|
|
|
|
|
iter = GetTableInternalIter();
|
|
|
|
current_c = 'a';
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 10);
|
|
|
|
ASSERT_EQ(pik.user_key, iter->value());
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
|
|
|
|
current_c++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(current_c, 'z' + 1);
|
|
|
|
|
|
|
|
// Verify Seek
|
|
|
|
for (char c = 'a'; c <= 'z'; c++) {
|
|
|
|
std::string k = std::string(8, c);
|
|
|
|
InternalKey ik(k, 10, kValueTypeForSeek);
|
|
|
|
iter->Seek(ik.Encode());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 10);
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), k);
|
|
|
|
ASSERT_EQ(iter->value().ToString(), k);
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// Update global sequence number to 3
|
|
|
|
SetGlobalSeqno(3);
|
|
|
|
GetVersionAndGlobalSeqno();
|
|
|
|
ASSERT_EQ(2, version);
|
|
|
|
ASSERT_EQ(3, global_seqno);
|
|
|
|
|
|
|
|
iter = GetTableInternalIter();
|
|
|
|
current_c = 'a';
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 3);
|
|
|
|
ASSERT_EQ(pik.user_key, iter->value());
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
|
|
|
|
current_c++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(current_c, 'z' + 1);
|
|
|
|
|
|
|
|
// Verify Seek
|
|
|
|
for (char c = 'a'; c <= 'z'; c++) {
|
|
|
|
std::string k = std::string(8, c);
|
|
|
|
// seqno=4 is less than 3 so we still should get our key
|
|
|
|
InternalKey ik(k, 4, kValueTypeForSeek);
|
|
|
|
iter->Seek(ik.Encode());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 3);
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), k);
|
|
|
|
ASSERT_EQ(iter->value().ToString(), k);
|
|
|
|
}
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2018-03-27 05:14:24 +02:00
|
|
|
TEST_F(BlockBasedTableTest, BlockAlignTest) {
|
|
|
|
BlockBasedTableOptions bbto;
|
|
|
|
bbto.block_align = true;
|
|
|
|
test::StringSink* sink = new test::StringSink();
|
|
|
|
unique_ptr<WritableFileWriter> file_writer(test::GetWritableFileWriter(sink));
|
|
|
|
Options options;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
InternalKeyComparator ikc(options.comparator);
|
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
|
|
|
std::string column_family_name;
|
|
|
|
std::unique_ptr<TableBuilder> builder(options.table_factory->NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
|
|
|
|
kNoCompression, CompressionOptions(),
|
|
|
|
nullptr /* compression_dict */,
|
|
|
|
false /* skip_filters */, column_family_name, -1),
|
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
|
|
|
file_writer.get()));
|
|
|
|
|
|
|
|
for (int i = 1; i <= 10000; ++i) {
|
|
|
|
std::ostringstream ostr;
|
|
|
|
ostr << std::setfill('0') << std::setw(5) << i;
|
|
|
|
std::string key = ostr.str();
|
|
|
|
std::string value = "val";
|
|
|
|
InternalKey ik(key, 0, kTypeValue);
|
|
|
|
|
|
|
|
builder->Add(ik.Encode(), value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(builder->Finish());
|
|
|
|
file_writer->Flush();
|
|
|
|
|
|
|
|
test::RandomRWStringSink ss_rw(sink);
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
|
|
|
new test::StringSource(ss_rw.contents(), 73342, true)));
|
|
|
|
|
|
|
|
// Helper function to get version, global_seqno, global_seqno_offset
|
|
|
|
std::function<void()> VerifyBlockAlignment = [&]() {
|
|
|
|
TableProperties* props = nullptr;
|
|
|
|
ASSERT_OK(ReadTableProperties(file_reader.get(), ss_rw.contents().size(),
|
|
|
|
kBlockBasedTableMagicNumber, ioptions,
|
|
|
|
&props));
|
|
|
|
|
|
|
|
uint64_t data_block_size = props->data_size / props->num_data_blocks;
|
|
|
|
ASSERT_EQ(data_block_size, 4096);
|
|
|
|
ASSERT_EQ(props->data_size, data_block_size * props->num_data_blocks);
|
|
|
|
delete props;
|
|
|
|
};
|
|
|
|
|
|
|
|
VerifyBlockAlignment();
|
|
|
|
|
|
|
|
// The below block of code verifies that we can read back the keys. Set
|
|
|
|
// block_align to false when creating the reader to ensure we can flip between
|
|
|
|
// the two modes without any issues
|
|
|
|
std::unique_ptr<TableReader> table_reader;
|
|
|
|
bbto.block_align = false;
|
|
|
|
Options options2;
|
|
|
|
options2.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
ImmutableCFOptions ioptions2(options2);
|
|
|
|
ASSERT_OK(ioptions.table_factory->NewTableReader(
|
|
|
|
TableReaderOptions(ioptions2, EnvOptions(),
|
|
|
|
GetPlainInternalComparator(options2.comparator)),
|
|
|
|
std::move(file_reader), ss_rw.contents().size(), &table_reader));
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIterator> db_iter(
|
|
|
|
table_reader->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
int expected_key = 1;
|
|
|
|
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
|
|
|
|
std::ostringstream ostr;
|
|
|
|
ostr << std::setfill('0') << std::setw(5) << expected_key++;
|
|
|
|
std::string key = ostr.str();
|
|
|
|
std::string value = "val";
|
|
|
|
|
|
|
|
ASSERT_OK(db_iter->status());
|
|
|
|
ASSERT_EQ(ExtractUserKey(db_iter->key()).ToString(), key);
|
|
|
|
ASSERT_EQ(db_iter->value().ToString(), value);
|
|
|
|
}
|
|
|
|
expected_key--;
|
|
|
|
ASSERT_EQ(expected_key, 10000);
|
|
|
|
table_reader.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlockBasedTableTest, BadOptions) {
|
|
|
|
rocksdb::Options options;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
rocksdb::BlockBasedTableOptions bbto;
|
|
|
|
bbto.block_size = 4000;
|
|
|
|
bbto.block_align = true;
|
|
|
|
|
|
|
|
const std::string kDBPath = test::TmpDir() + "/table_prefix_test";
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
DestroyDB(kDBPath, options);
|
|
|
|
rocksdb::DB* db;
|
|
|
|
ASSERT_NOK(rocksdb::DB::Open(options, kDBPath, &db));
|
|
|
|
|
|
|
|
bbto.block_size = 4096;
|
|
|
|
options.compression = kSnappyCompression;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
ASSERT_NOK(rocksdb::DB::Open(options, kDBPath, &db));
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 22:08:00 +01:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|