2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2014-04-10 23:19:43 +02:00
|
|
|
#include <stdio.h>
|
2016-08-27 03:55:58 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <set>
|
2012-12-20 20:05:41 +01:00
|
|
|
#include <string>
|
2016-08-27 03:55:58 +02:00
|
|
|
#include <unordered_set>
|
|
|
|
#include <utility>
|
2014-04-10 23:19:43 +02:00
|
|
|
#include <vector>
|
|
|
|
|
2012-12-20 20:05:41 +01:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2017-04-06 22:59:31 +02:00
|
|
|
#include "db/memtable.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2013-10-29 01:54:09 +01:00
|
|
|
#include "rocksdb/table.h"
|
2014-04-10 23:19:43 +02:00
|
|
|
#include "rocksdb/slice_transform.h"
|
2012-12-20 20:05:41 +01:00
|
|
|
#include "table/block.h"
|
|
|
|
#include "table/block_builder.h"
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2012-12-20 20:05:41 +01:00
|
|
|
|
|
|
|
static std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
|
|
|
|
Random *rnd) {
|
|
|
|
char buf[50];
|
|
|
|
char *p = &buf[0];
|
|
|
|
snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
|
|
|
|
std::string k(p);
|
|
|
|
if (padding_size) {
|
|
|
|
k += RandomString(rnd, padding_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return k;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate random key value pairs.
|
|
|
|
// The generated key will be sorted. You can tune the parameters to generated
|
|
|
|
// different kinds of test key/value pairs for different scenario.
|
|
|
|
void GenerateRandomKVs(std::vector<std::string> *keys,
|
|
|
|
std::vector<std::string> *values, const int from,
|
|
|
|
const int len, const int step = 1,
|
|
|
|
const int padding_size = 0,
|
|
|
|
const int keys_share_prefix = 1) {
|
|
|
|
Random rnd(302);
|
|
|
|
|
|
|
|
// generate different prefix
|
|
|
|
for (int i = from; i < from + len; i += step) {
|
|
|
|
// generating keys that shares the prefix
|
|
|
|
for (int j = 0; j < keys_share_prefix; ++j) {
|
|
|
|
keys->emplace_back(GenerateKey(i, j, padding_size, &rnd));
|
|
|
|
|
|
|
|
// 100 bytes values
|
|
|
|
values->emplace_back(RandomString(&rnd, 100));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-20 20:05:41 +01:00
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
// Same as GenerateRandomKVs but the values are BlockHandle
|
|
|
|
void GenerateRandomKBHs(std::vector<std::string> *keys,
|
2018-08-23 19:04:10 +02:00
|
|
|
std::vector<BlockHandle> *values, const int from,
|
|
|
|
const int len, const int step = 1,
|
|
|
|
const int padding_size = 0,
|
|
|
|
const int keys_share_prefix = 1) {
|
2018-08-10 01:49:45 +02:00
|
|
|
Random rnd(302);
|
|
|
|
uint64_t offset = 0;
|
|
|
|
|
|
|
|
// generate different prefix
|
|
|
|
for (int i = from; i < from + len; i += step) {
|
|
|
|
// generate keys that shares the prefix
|
|
|
|
for (int j = 0; j < keys_share_prefix; ++j) {
|
|
|
|
keys->emplace_back(GenerateKey(i, j, padding_size, &rnd));
|
|
|
|
|
|
|
|
uint64_t size = rnd.Uniform(1024 * 16);
|
|
|
|
BlockHandle handle(offset, size);
|
|
|
|
offset += size + kBlockTrailerSize;
|
|
|
|
values->emplace_back(handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class BlockTest : public testing::Test {};
|
2012-12-20 20:05:41 +01:00
|
|
|
|
|
|
|
// block test
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockTest, SimpleTest) {
|
2012-12-20 20:05:41 +01:00
|
|
|
Random rnd(301);
|
|
|
|
Options options = Options();
|
2014-01-27 22:53:22 +01:00
|
|
|
std::unique_ptr<InternalKeyComparator> ic;
|
|
|
|
ic.reset(new test::PlainInternalKeyComparator(options.comparator));
|
|
|
|
|
2012-12-20 20:05:41 +01:00
|
|
|
std::vector<std::string> keys;
|
|
|
|
std::vector<std::string> values;
|
2014-09-02 20:49:38 +02:00
|
|
|
BlockBuilder builder(16);
|
2012-12-20 20:05:41 +01:00
|
|
|
int num_records = 100000;
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
GenerateRandomKVs(&keys, &values, 0, num_records);
|
2012-12-20 20:05:41 +01:00
|
|
|
// add a bunch of records to a block
|
|
|
|
for (int i = 0; i < num_records; i++) {
|
2014-04-10 23:19:43 +02:00
|
|
|
builder.Add(keys[i], values[i]);
|
2012-12-20 20:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// read serialized contents of the block
|
|
|
|
Slice rawblock = builder.Finish();
|
|
|
|
|
2013-04-23 00:20:20 +02:00
|
|
|
// create block reader
|
2012-12-20 20:05:41 +01:00
|
|
|
BlockContents contents;
|
|
|
|
contents.data = rawblock;
|
|
|
|
contents.cachable = false;
|
2016-10-19 01:59:37 +02:00
|
|
|
Block reader(std::move(contents), kDisableGlobalSequenceNumber);
|
2012-12-20 20:05:41 +01:00
|
|
|
|
|
|
|
// read contents of block sequentially
|
|
|
|
int count = 0;
|
2018-05-26 03:41:31 +02:00
|
|
|
InternalIterator *iter =
|
2018-07-16 18:58:58 +02:00
|
|
|
reader.NewIterator<DataBlockIter>(options.comparator, options.comparator);
|
2012-12-20 20:05:41 +01:00
|
|
|
for (iter->SeekToFirst();iter->Valid(); count++, iter->Next()) {
|
2013-04-23 00:20:20 +02:00
|
|
|
|
2012-12-20 20:05:41 +01:00
|
|
|
// read kv from block
|
|
|
|
Slice k = iter->key();
|
|
|
|
Slice v = iter->value();
|
|
|
|
|
|
|
|
// compare with lookaside array
|
|
|
|
ASSERT_EQ(k.ToString().compare(keys[count]), 0);
|
|
|
|
ASSERT_EQ(v.ToString().compare(values[count]), 0);
|
|
|
|
}
|
|
|
|
delete iter;
|
2013-04-23 00:20:20 +02:00
|
|
|
|
|
|
|
// read block contents randomly
|
2018-07-16 18:58:58 +02:00
|
|
|
iter =
|
|
|
|
reader.NewIterator<DataBlockIter>(options.comparator, options.comparator);
|
2012-12-20 20:05:41 +01:00
|
|
|
for (int i = 0; i < num_records; i++) {
|
|
|
|
|
|
|
|
// find a random key in the lookaside array
|
|
|
|
int index = rnd.Uniform(num_records);
|
|
|
|
Slice k(keys[index]);
|
|
|
|
|
|
|
|
// search in block for this key
|
|
|
|
iter->Seek(k);
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
Slice v = iter->value();
|
|
|
|
ASSERT_EQ(v.ToString().compare(values[index]), 0);
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
TEST_F(BlockTest, ValueDeltaEncodingTest) {
|
|
|
|
Random rnd(301);
|
|
|
|
Options options = Options();
|
|
|
|
std::unique_ptr<InternalKeyComparator> ic;
|
|
|
|
ic.reset(new test::PlainInternalKeyComparator(options.comparator));
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
std::vector<BlockHandle> values;
|
|
|
|
const bool kUseDeltaEncoding = true;
|
|
|
|
const bool kUseValueDeltaEncoding = true;
|
|
|
|
BlockBuilder builder(16, kUseDeltaEncoding, kUseValueDeltaEncoding);
|
|
|
|
int num_records = 100;
|
|
|
|
|
|
|
|
GenerateRandomKBHs(&keys, &values, 0, num_records);
|
|
|
|
// add a bunch of records to a block
|
|
|
|
BlockHandle last_encoded_handle;
|
|
|
|
for (int i = 0; i < num_records; i++) {
|
|
|
|
auto block_handle = values[i];
|
|
|
|
std::string handle_encoding;
|
|
|
|
block_handle.EncodeTo(&handle_encoding);
|
|
|
|
std::string handle_delta_encoding;
|
|
|
|
PutVarsignedint64(&handle_delta_encoding,
|
|
|
|
block_handle.size() - last_encoded_handle.size());
|
|
|
|
last_encoded_handle = block_handle;
|
|
|
|
const Slice handle_delta_encoding_slice(handle_delta_encoding);
|
|
|
|
builder.Add(keys[i], handle_encoding, &handle_delta_encoding_slice);
|
|
|
|
}
|
|
|
|
|
|
|
|
// read serialized contents of the block
|
|
|
|
Slice rawblock = builder.Finish();
|
|
|
|
|
|
|
|
// create block reader
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = rawblock;
|
|
|
|
contents.cachable = false;
|
|
|
|
Block reader(std::move(contents), kDisableGlobalSequenceNumber);
|
|
|
|
|
|
|
|
const bool kTotalOrderSeek = true;
|
|
|
|
const bool kIncludesSeq = true;
|
|
|
|
const bool kValueIsFull = !kUseValueDeltaEncoding;
|
|
|
|
IndexBlockIter *kNullIter = nullptr;
|
|
|
|
Statistics *kNullStats = nullptr;
|
|
|
|
// read contents of block sequentially
|
|
|
|
int count = 0;
|
|
|
|
InternalIteratorBase<BlockHandle> *iter = reader.NewIterator<IndexBlockIter>(
|
|
|
|
options.comparator, options.comparator, kNullIter, kNullStats,
|
|
|
|
kTotalOrderSeek, kIncludesSeq, kValueIsFull);
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); count++, iter->Next()) {
|
|
|
|
// read kv from block
|
|
|
|
Slice k = iter->key();
|
|
|
|
BlockHandle handle = iter->value();
|
|
|
|
|
|
|
|
// compare with lookaside array
|
|
|
|
ASSERT_EQ(k.ToString().compare(keys[count]), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(values[count].offset(), handle.offset());
|
|
|
|
ASSERT_EQ(values[count].size(), handle.size());
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// read block contents randomly
|
|
|
|
iter = reader.NewIterator<IndexBlockIter>(
|
|
|
|
options.comparator, options.comparator, kNullIter, kNullStats,
|
|
|
|
kTotalOrderSeek, kIncludesSeq, kValueIsFull);
|
|
|
|
for (int i = 0; i < num_records; i++) {
|
|
|
|
// find a random key in the lookaside array
|
|
|
|
int index = rnd.Uniform(num_records);
|
|
|
|
Slice k(keys[index]);
|
|
|
|
|
|
|
|
// search in block for this key
|
|
|
|
iter->Seek(k);
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
BlockHandle handle = iter->value();
|
|
|
|
ASSERT_EQ(values[index].offset(), handle.offset());
|
|
|
|
ASSERT_EQ(values[index].size(), handle.size());
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
// return the block contents
|
|
|
|
BlockContents GetBlockContents(std::unique_ptr<BlockBuilder> *builder,
|
|
|
|
const std::vector<std::string> &keys,
|
|
|
|
const std::vector<std::string> &values,
|
2018-03-05 22:08:17 +01:00
|
|
|
const int /*prefix_group_size*/ = 1) {
|
2014-09-02 20:49:38 +02:00
|
|
|
builder->reset(new BlockBuilder(1 /* restart interval */));
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// Add only half of the keys
|
|
|
|
for (size_t i = 0; i < keys.size(); ++i) {
|
|
|
|
(*builder)->Add(keys[i], values[i]);
|
|
|
|
}
|
|
|
|
Slice rawblock = (*builder)->Finish();
|
|
|
|
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = rawblock;
|
|
|
|
contents.cachable = false;
|
|
|
|
|
|
|
|
return contents;
|
|
|
|
}
|
|
|
|
|
2014-09-18 01:45:58 +02:00
|
|
|
void CheckBlockContents(BlockContents contents, const int max_key,
|
2014-04-10 23:19:43 +02:00
|
|
|
const std::vector<std::string> &keys,
|
|
|
|
const std::vector<std::string> &values) {
|
|
|
|
const size_t prefix_size = 6;
|
|
|
|
// create block reader
|
2014-09-18 01:45:58 +02:00
|
|
|
BlockContents contents_ref(contents.data, contents.cachable,
|
|
|
|
contents.compression_type);
|
2016-10-19 01:59:37 +02:00
|
|
|
Block reader1(std::move(contents), kDisableGlobalSequenceNumber);
|
|
|
|
Block reader2(std::move(contents_ref), kDisableGlobalSequenceNumber);
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
std::unique_ptr<const SliceTransform> prefix_extractor(
|
|
|
|
NewFixedPrefixTransform(prefix_size));
|
|
|
|
|
2018-07-16 18:58:58 +02:00
|
|
|
std::unique_ptr<InternalIterator> regular_iter(
|
|
|
|
reader2.NewIterator<DataBlockIter>(BytewiseComparator(),
|
|
|
|
BytewiseComparator()));
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// Seek existent keys
|
|
|
|
for (size_t i = 0; i < keys.size(); i++) {
|
2016-05-21 02:14:38 +02:00
|
|
|
regular_iter->Seek(keys[i]);
|
|
|
|
ASSERT_OK(regular_iter->status());
|
|
|
|
ASSERT_TRUE(regular_iter->Valid());
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2016-05-21 02:14:38 +02:00
|
|
|
Slice v = regular_iter->value();
|
2014-04-10 23:19:43 +02:00
|
|
|
ASSERT_EQ(v.ToString().compare(values[i]), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Seek non-existent keys.
|
|
|
|
// For hash index, if no key with a given prefix is not found, iterator will
|
|
|
|
// simply be set as invalid; whereas the binary search based iterator will
|
|
|
|
// return the one that is closest.
|
|
|
|
for (int i = 1; i < max_key - 1; i += 2) {
|
|
|
|
auto key = GenerateKey(i, 0, 0, nullptr);
|
|
|
|
regular_iter->Seek(key);
|
|
|
|
ASSERT_TRUE(regular_iter->Valid());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this test case, no two key share same prefix.
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockTest, SimpleIndexHash) {
|
2014-04-10 23:19:43 +02:00
|
|
|
const int kMaxKey = 100000;
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
std::vector<std::string> values;
|
|
|
|
GenerateRandomKVs(&keys, &values, 0 /* first key id */,
|
|
|
|
kMaxKey /* last key id */, 2 /* step */,
|
|
|
|
8 /* padding size (8 bytes randomly generated suffix) */);
|
|
|
|
|
|
|
|
std::unique_ptr<BlockBuilder> builder;
|
|
|
|
auto contents = GetBlockContents(&builder, keys, values);
|
|
|
|
|
2014-09-18 01:45:58 +02:00
|
|
|
CheckBlockContents(std::move(contents), kMaxKey, keys, values);
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(BlockTest, IndexHashWithSharedPrefix) {
|
2014-04-10 23:19:43 +02:00
|
|
|
const int kMaxKey = 100000;
|
|
|
|
// for each prefix, there will be 5 keys starts with it.
|
|
|
|
const int kPrefixGroup = 5;
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
std::vector<std::string> values;
|
|
|
|
// Generate keys with same prefix.
|
|
|
|
GenerateRandomKVs(&keys, &values, 0, // first key id
|
|
|
|
kMaxKey, // last key id
|
|
|
|
2, // step
|
|
|
|
10, // padding size,
|
|
|
|
kPrefixGroup);
|
|
|
|
|
|
|
|
std::unique_ptr<BlockBuilder> builder;
|
|
|
|
auto contents = GetBlockContents(&builder, keys, values, kPrefixGroup);
|
|
|
|
|
2014-09-18 01:45:58 +02:00
|
|
|
CheckBlockContents(std::move(contents), kMaxKey, keys, values);
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
2016-08-27 03:55:58 +02:00
|
|
|
// A slow and accurate version of BlockReadAmpBitmap that simply store
|
|
|
|
// all the marked ranges in a set.
|
|
|
|
class BlockReadAmpBitmapSlowAndAccurate {
|
|
|
|
public:
|
|
|
|
void Mark(size_t start_offset, size_t end_offset) {
|
|
|
|
assert(end_offset >= start_offset);
|
|
|
|
marked_ranges_.emplace(end_offset, start_offset);
|
|
|
|
}
|
|
|
|
|
2018-01-02 19:34:51 +01:00
|
|
|
void ResetCheckSequence() { iter_valid_ = false; }
|
|
|
|
|
2016-08-27 03:55:58 +02:00
|
|
|
// Return true if any byte in this range was Marked
|
2018-01-02 19:34:51 +01:00
|
|
|
// This does linear search from the previous position. When calling
|
|
|
|
// multiple times, `offset` needs to be incremental to get correct results.
|
|
|
|
// Call ResetCheckSequence() to reset it.
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
bool IsPinMarked(size_t offset) {
|
2018-01-02 19:34:51 +01:00
|
|
|
if (iter_valid_) {
|
|
|
|
// Has existing iterator, try linear search from
|
|
|
|
// the iterator.
|
|
|
|
for (int i = 0; i < 64; i++) {
|
|
|
|
if (offset < iter_->second) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (offset <= iter_->first) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
iter_++;
|
|
|
|
if (iter_ == marked_ranges_.end()) {
|
|
|
|
iter_valid_ = false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Initial call or have linear searched too many times.
|
|
|
|
// Do binary search.
|
|
|
|
iter_ = marked_ranges_.lower_bound(
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
std::make_pair(offset, static_cast<size_t>(0)));
|
2018-01-02 19:34:51 +01:00
|
|
|
if (iter_ == marked_ranges_.end()) {
|
|
|
|
iter_valid_ = false;
|
2016-08-27 03:55:58 +02:00
|
|
|
return false;
|
|
|
|
}
|
2018-01-02 19:34:51 +01:00
|
|
|
iter_valid_ = true;
|
|
|
|
return offset <= iter_->first && offset >= iter_->second;
|
2016-08-27 03:55:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2016-09-01 00:10:12 +02:00
|
|
|
std::set<std::pair<size_t, size_t>> marked_ranges_;
|
2018-01-02 19:34:51 +01:00
|
|
|
std::set<std::pair<size_t, size_t>>::iterator iter_;
|
|
|
|
bool iter_valid_ = false;
|
2016-08-27 03:55:58 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(BlockTest, BlockReadAmpBitmap) {
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
uint32_t pin_offset = 0;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"BlockReadAmpBitmap:rnd", [&pin_offset](void* arg) {
|
|
|
|
pin_offset = *(static_cast<uint32_t*>(arg));
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
2016-08-27 03:55:58 +02:00
|
|
|
std::vector<size_t> block_sizes = {
|
2018-01-02 19:34:51 +01:00
|
|
|
1, // 1 byte
|
|
|
|
32, // 32 bytes
|
|
|
|
61, // 61 bytes
|
|
|
|
64, // 64 bytes
|
|
|
|
512, // 0.5 KB
|
|
|
|
1024, // 1 KB
|
|
|
|
1024 * 4, // 4 KB
|
|
|
|
1024 * 10, // 10 KB
|
|
|
|
1024 * 50, // 50 KB
|
|
|
|
1024 * 1024 * 4, // 5 MB
|
2016-08-27 03:55:58 +02:00
|
|
|
777,
|
|
|
|
124653,
|
|
|
|
};
|
|
|
|
const size_t kBytesPerBit = 64;
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (size_t block_size : block_sizes) {
|
|
|
|
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
|
|
|
BlockReadAmpBitmap read_amp_bitmap(block_size, kBytesPerBit, stats.get());
|
|
|
|
BlockReadAmpBitmapSlowAndAccurate read_amp_slow_and_accurate;
|
|
|
|
|
|
|
|
size_t needed_bits = (block_size / kBytesPerBit);
|
|
|
|
if (block_size % kBytesPerBit != 0) {
|
|
|
|
needed_bits++;
|
|
|
|
}
|
|
|
|
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
ASSERT_EQ(stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES), block_size);
|
2016-08-27 03:55:58 +02:00
|
|
|
|
|
|
|
// Generate some random entries
|
|
|
|
std::vector<size_t> random_entry_offsets;
|
|
|
|
for (int i = 0; i < 1000; i++) {
|
|
|
|
random_entry_offsets.push_back(rnd.Next() % block_size);
|
|
|
|
}
|
|
|
|
std::sort(random_entry_offsets.begin(), random_entry_offsets.end());
|
|
|
|
auto it =
|
|
|
|
std::unique(random_entry_offsets.begin(), random_entry_offsets.end());
|
|
|
|
random_entry_offsets.resize(
|
|
|
|
std::distance(random_entry_offsets.begin(), it));
|
|
|
|
|
2016-09-06 17:41:43 +02:00
|
|
|
std::vector<std::pair<size_t, size_t>> random_entries;
|
2016-08-27 03:55:58 +02:00
|
|
|
for (size_t i = 0; i < random_entry_offsets.size(); i++) {
|
|
|
|
size_t entry_start = random_entry_offsets[i];
|
|
|
|
size_t entry_end;
|
|
|
|
if (i + 1 < random_entry_offsets.size()) {
|
|
|
|
entry_end = random_entry_offsets[i + 1] - 1;
|
|
|
|
} else {
|
|
|
|
entry_end = block_size - 1;
|
|
|
|
}
|
|
|
|
random_entries.emplace_back(entry_start, entry_end);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < random_entries.size(); i++) {
|
2018-01-02 19:34:51 +01:00
|
|
|
read_amp_slow_and_accurate.ResetCheckSequence();
|
2016-08-27 03:55:58 +02:00
|
|
|
auto ¤t_entry = random_entries[rnd.Next() % random_entries.size()];
|
|
|
|
|
2016-09-06 21:28:55 +02:00
|
|
|
read_amp_bitmap.Mark(static_cast<uint32_t>(current_entry.first),
|
|
|
|
static_cast<uint32_t>(current_entry.second));
|
2016-08-27 03:55:58 +02:00
|
|
|
read_amp_slow_and_accurate.Mark(current_entry.first,
|
|
|
|
current_entry.second);
|
|
|
|
|
|
|
|
size_t total_bits = 0;
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
for (size_t bit_idx = 0; bit_idx < needed_bits; bit_idx++) {
|
|
|
|
total_bits += read_amp_slow_and_accurate.IsPinMarked(
|
|
|
|
bit_idx * kBytesPerBit + pin_offset);
|
2016-08-27 03:55:58 +02:00
|
|
|
}
|
|
|
|
size_t expected_estimate_useful = total_bits * kBytesPerBit;
|
|
|
|
size_t got_estimate_useful =
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
stats->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
|
2016-08-27 03:55:58 +02:00
|
|
|
ASSERT_EQ(expected_estimate_useful, got_estimate_useful);
|
|
|
|
}
|
|
|
|
}
|
unbiase readamp bitmap
Summary:
Consider BlockReadAmpBitmap with bytes_per_bit = 32. Suppose bytes [a, b) were used, while bytes [a-32, a)
and [b+1, b+33) weren't used; more formally, the union of ranges passed to BlockReadAmpBitmap::Mark() contains [a, b) and doesn't intersect with [a-32, a) and [b+1, b+33). Then bits [floor(a/32), ceil(b/32)] will be set, and so the number of useful bytes will be estimated as (ceil(b/32) - floor(a/32)) * 32, which is on average equal to b-a+31.
An extreme example: if we use 1 byte from each block, it'll be counted as 32 bytes from each block.
It's easy to remove this bias by slightly changing the semantics of the bitmap. Currently each bit represents a byte range [i*32, (i+1)*32).
This diff makes each bit represent a single byte: i*32 + X, where X is a random number in [0, 31] generated when bitmap is created. So, e.g., if you read a single byte at random, with probability 31/32 it won't be counted at all, and with probability 1/32 it will be counted as 32 bytes; so, on average it's counted as 1 byte.
*But there is one exception: the last bit will always set with the old way.*
(*) - assuming read_amp_bytes_per_bit = 32.
Closes https://github.com/facebook/rocksdb/pull/2259
Differential Revision: D5035652
Pulled By: lightmark
fbshipit-source-id: bd98b1b9b49fbe61f9e3781d07f624e3cbd92356
2017-05-10 10:32:52 +02:00
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
2016-08-27 03:55:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlockTest, BlockWithReadAmpBitmap) {
|
|
|
|
Random rnd(301);
|
|
|
|
Options options = Options();
|
|
|
|
std::unique_ptr<InternalKeyComparator> ic;
|
|
|
|
ic.reset(new test::PlainInternalKeyComparator(options.comparator));
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
std::vector<std::string> values;
|
|
|
|
BlockBuilder builder(16);
|
|
|
|
int num_records = 10000;
|
|
|
|
|
|
|
|
GenerateRandomKVs(&keys, &values, 0, num_records, 1);
|
|
|
|
// add a bunch of records to a block
|
|
|
|
for (int i = 0; i < num_records; i++) {
|
|
|
|
builder.Add(keys[i], values[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice rawblock = builder.Finish();
|
|
|
|
const size_t kBytesPerBit = 8;
|
|
|
|
|
|
|
|
// Read the block sequentially using Next()
|
|
|
|
{
|
|
|
|
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
|
|
|
|
|
|
|
// create block reader
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = rawblock;
|
|
|
|
contents.cachable = true;
|
2016-10-19 01:59:37 +02:00
|
|
|
Block reader(std::move(contents), kDisableGlobalSequenceNumber,
|
|
|
|
kBytesPerBit, stats.get());
|
2016-08-27 03:55:58 +02:00
|
|
|
|
|
|
|
// read contents of block sequentially
|
|
|
|
size_t read_bytes = 0;
|
2018-07-13 02:19:57 +02:00
|
|
|
DataBlockIter *iter =
|
|
|
|
static_cast<DataBlockIter *>(reader.NewIterator<DataBlockIter>(
|
|
|
|
options.comparator, options.comparator, nullptr, stats.get()));
|
2016-08-27 03:55:58 +02:00
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
iter->value();
|
|
|
|
read_bytes += iter->TEST_CurrentEntrySize();
|
|
|
|
|
|
|
|
double semi_acc_read_amp =
|
|
|
|
static_cast<double>(read_bytes) / rawblock.size();
|
|
|
|
double read_amp = static_cast<double>(stats->getTickerCount(
|
|
|
|
READ_AMP_ESTIMATE_USEFUL_BYTES)) /
|
|
|
|
stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
|
|
|
|
|
|
|
|
// Error in read amplification will be less than 1% if we are reading
|
|
|
|
// sequentially
|
|
|
|
double error_pct = fabs(semi_acc_read_amp - read_amp) * 100;
|
|
|
|
EXPECT_LT(error_pct, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the block sequentially using Seek()
|
|
|
|
{
|
|
|
|
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
|
|
|
|
|
|
|
// create block reader
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = rawblock;
|
|
|
|
contents.cachable = true;
|
2016-10-19 01:59:37 +02:00
|
|
|
Block reader(std::move(contents), kDisableGlobalSequenceNumber,
|
|
|
|
kBytesPerBit, stats.get());
|
2016-08-27 03:55:58 +02:00
|
|
|
|
|
|
|
size_t read_bytes = 0;
|
2018-07-13 02:19:57 +02:00
|
|
|
DataBlockIter *iter =
|
|
|
|
static_cast<DataBlockIter *>(reader.NewIterator<DataBlockIter>(
|
|
|
|
options.comparator, options.comparator, nullptr, stats.get()));
|
2016-08-27 03:55:58 +02:00
|
|
|
for (int i = 0; i < num_records; i++) {
|
|
|
|
Slice k(keys[i]);
|
|
|
|
|
|
|
|
// search in block for this key
|
|
|
|
iter->Seek(k);
|
|
|
|
iter->value();
|
|
|
|
read_bytes += iter->TEST_CurrentEntrySize();
|
|
|
|
|
|
|
|
double semi_acc_read_amp =
|
|
|
|
static_cast<double>(read_bytes) / rawblock.size();
|
|
|
|
double read_amp = static_cast<double>(stats->getTickerCount(
|
|
|
|
READ_AMP_ESTIMATE_USEFUL_BYTES)) /
|
|
|
|
stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
|
|
|
|
|
|
|
|
// Error in read amplification will be less than 1% if we are reading
|
|
|
|
// sequentially
|
|
|
|
double error_pct = fabs(semi_acc_read_amp - read_amp) * 100;
|
|
|
|
EXPECT_LT(error_pct, 1);
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the block randomly
|
|
|
|
{
|
|
|
|
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
|
|
|
|
|
|
|
// create block reader
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = rawblock;
|
|
|
|
contents.cachable = true;
|
2016-10-19 01:59:37 +02:00
|
|
|
Block reader(std::move(contents), kDisableGlobalSequenceNumber,
|
|
|
|
kBytesPerBit, stats.get());
|
2016-08-27 03:55:58 +02:00
|
|
|
|
|
|
|
size_t read_bytes = 0;
|
2018-07-13 02:19:57 +02:00
|
|
|
DataBlockIter *iter =
|
|
|
|
static_cast<DataBlockIter *>(reader.NewIterator<DataBlockIter>(
|
|
|
|
options.comparator, options.comparator, nullptr, stats.get()));
|
2016-08-27 03:55:58 +02:00
|
|
|
std::unordered_set<int> read_keys;
|
|
|
|
for (int i = 0; i < num_records; i++) {
|
|
|
|
int index = rnd.Uniform(num_records);
|
|
|
|
Slice k(keys[index]);
|
|
|
|
|
|
|
|
iter->Seek(k);
|
|
|
|
iter->value();
|
|
|
|
if (read_keys.find(index) == read_keys.end()) {
|
|
|
|
read_keys.insert(index);
|
|
|
|
read_bytes += iter->TEST_CurrentEntrySize();
|
|
|
|
}
|
|
|
|
|
|
|
|
double semi_acc_read_amp =
|
|
|
|
static_cast<double>(read_bytes) / rawblock.size();
|
|
|
|
double read_amp = static_cast<double>(stats->getTickerCount(
|
|
|
|
READ_AMP_ESTIMATE_USEFUL_BYTES)) /
|
|
|
|
stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
|
|
|
|
|
|
|
|
double error_pct = fabs(semi_acc_read_amp - read_amp) * 100;
|
|
|
|
// Error in read amplification will be less than 2% if we are reading
|
|
|
|
// randomly
|
|
|
|
EXPECT_LT(error_pct, 2);
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlockTest, ReadAmpBitmapPow2) {
|
|
|
|
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 1, stats.get()).GetBytesPerBit(), 1);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 2, stats.get()).GetBytesPerBit(), 2);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 4, stats.get()).GetBytesPerBit(), 4);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 8, stats.get()).GetBytesPerBit(), 8);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 16, stats.get()).GetBytesPerBit(), 16);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 32, stats.get()).GetBytesPerBit(), 32);
|
|
|
|
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 3, stats.get()).GetBytesPerBit(), 2);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 7, stats.get()).GetBytesPerBit(), 4);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 11, stats.get()).GetBytesPerBit(), 8);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 17, stats.get()).GetBytesPerBit(), 16);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 33, stats.get()).GetBytesPerBit(), 32);
|
|
|
|
ASSERT_EQ(BlockReadAmpBitmap(100, 35, stats.get()).GetBytesPerBit(), 32);
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|
2012-12-20 20:05:41 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
int main(int argc, char **argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2012-12-20 20:05:41 +01:00
|
|
|
}
|