2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// Decodes the blocks generated by block_builder.cc.
|
|
|
|
|
|
|
|
#include "table/block.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
2014-04-10 23:19:43 +02:00
|
|
|
#include <string>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <vector>
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
2014-07-31 01:34:35 +02:00
|
|
|
#include "table/format.h"
|
2014-04-10 23:19:43 +02:00
|
|
|
#include "table/block_hash_index.h"
|
2014-06-13 04:03:22 +02:00
|
|
|
#include "table/block_prefix_index.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/logging.h"
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Helper routine: decode the next block entry starting at "p",
|
|
|
|
// storing the number of shared key bytes, non_shared key bytes,
|
|
|
|
// and the length of the value in "*shared", "*non_shared", and
|
|
|
|
// "*value_length", respectively. Will not derefence past "limit".
|
|
|
|
//
|
2013-03-01 03:04:58 +01:00
|
|
|
// If any errors are detected, returns nullptr. Otherwise, returns a
|
2011-03-18 23:37:00 +01:00
|
|
|
// pointer to the key delta (just past the three decoded values).
|
|
|
|
static inline const char* DecodeEntry(const char* p, const char* limit,
|
|
|
|
uint32_t* shared,
|
|
|
|
uint32_t* non_shared,
|
|
|
|
uint32_t* value_length) {
|
2013-03-01 03:04:58 +01:00
|
|
|
if (limit - p < 3) return nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
*shared = reinterpret_cast<const unsigned char*>(p)[0];
|
|
|
|
*non_shared = reinterpret_cast<const unsigned char*>(p)[1];
|
|
|
|
*value_length = reinterpret_cast<const unsigned char*>(p)[2];
|
|
|
|
if ((*shared | *non_shared | *value_length) < 128) {
|
|
|
|
// Fast path: all three values are encoded in one byte each
|
|
|
|
p += 3;
|
|
|
|
} else {
|
2013-03-01 03:04:58 +01:00
|
|
|
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
|
|
|
|
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
|
|
|
|
if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2011-04-21 00:50:04 +02:00
|
|
|
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
|
2013-03-01 03:04:58 +01:00
|
|
|
return nullptr;
|
2011-04-21 00:48:11 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
void BlockIter::Next() {
|
|
|
|
assert(Valid());
|
|
|
|
ParseNextKey();
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
void BlockIter::Prev() {
|
|
|
|
assert(Valid());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
// Scan backwards to a restart point before current_
|
|
|
|
const uint32_t original = current_;
|
|
|
|
while (GetRestartPoint(restart_index_) >= original) {
|
|
|
|
if (restart_index_ == 0) {
|
|
|
|
// No more entries
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
restart_index_--;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
SeekToRestartPoint(restart_index_);
|
|
|
|
do {
|
|
|
|
// Loop until end of current entry hits the start of original entry
|
|
|
|
} while (ParseNextKey() && NextEntryOffset() < original);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
void BlockIter::Seek(const Slice& target) {
|
|
|
|
if (data_ == nullptr) { // Not init yet
|
|
|
|
return;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
uint32_t index = 0;
|
|
|
|
bool ok = false;
|
|
|
|
if (prefix_index_) {
|
|
|
|
ok = PrefixSeek(target, &index);
|
|
|
|
} else {
|
|
|
|
ok = hash_index_ ? HashSeek(target, &index)
|
|
|
|
: BinarySeek(target, 0, num_restarts_ - 1, &index);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
if (!ok) {
|
|
|
|
return;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
SeekToRestartPoint(index);
|
|
|
|
// Linear search (within restart block) for first key >= target
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
while (true) {
|
|
|
|
if (!ParseNextKey() || Compare(key_.GetKey(), target) >= 0) {
|
2014-04-10 23:19:43 +02:00
|
|
|
return;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
void BlockIter::SeekToFirst() {
|
|
|
|
if (data_ == nullptr) { // Not init yet
|
|
|
|
return;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
SeekToRestartPoint(0);
|
|
|
|
ParseNextKey();
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
void BlockIter::SeekToLast() {
|
|
|
|
if (data_ == nullptr) { // Not init yet
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SeekToRestartPoint(num_restarts_ - 1);
|
|
|
|
while (ParseNextKey() && NextEntryOffset() < restarts_) {
|
|
|
|
// Keep skipping
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockIter::CorruptionError() {
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
status_ = Status::Corruption("bad entry in block");
|
|
|
|
key_.Clear();
|
|
|
|
value_.clear();
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
bool BlockIter::ParseNextKey() {
|
2011-03-18 23:37:00 +01:00
|
|
|
current_ = NextEntryOffset();
|
|
|
|
const char* p = data_ + current_;
|
|
|
|
const char* limit = data_ + restarts_; // Restarts come right after data
|
|
|
|
if (p >= limit) {
|
|
|
|
// No more entries to return. Mark as invalid.
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode next entry
|
|
|
|
uint32_t shared, non_shared, value_length;
|
|
|
|
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
|
2014-07-23 21:31:11 +02:00
|
|
|
if (p == nullptr || key_.Size() < shared) {
|
2011-03-18 23:37:00 +01:00
|
|
|
CorruptionError();
|
|
|
|
return false;
|
|
|
|
} else {
|
2014-07-23 21:31:11 +02:00
|
|
|
key_.TrimAppend(shared, p, non_shared);
|
2011-03-18 23:37:00 +01:00
|
|
|
value_ = Slice(p + non_shared, value_length);
|
|
|
|
while (restart_index_ + 1 < num_restarts_ &&
|
|
|
|
GetRestartPoint(restart_index_ + 1) < current_) {
|
|
|
|
++restart_index_;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2014-06-13 04:03:22 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
// Binary search in restart array to find the first restart point
|
|
|
|
// with a key >= target (TODO: this comment is inaccurate)
|
|
|
|
bool BlockIter::BinarySeek(const Slice& target, uint32_t left, uint32_t right,
|
2014-04-10 23:19:43 +02:00
|
|
|
uint32_t* index) {
|
2014-07-31 01:34:35 +02:00
|
|
|
assert(left <= right);
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
while (left < right) {
|
|
|
|
uint32_t mid = (left + right + 1) / 2;
|
|
|
|
uint32_t region_offset = GetRestartPoint(mid);
|
Block::Iter::PrefixSeek() to have an extra check to filter out some false matches
Summary:
In block based table's hash index checking, when looking for a key that doesn't exist, there is a high chance that a false block is returned because of hash bucket conflicts. In this revision, another check is done to filter out some of those cases: comparing previous key of the block boundary to see whether the target block is what we are looking for.
In a favored test setting (bloom filter disabled, 8 L0 files), I saw about 80% improvements. In a non-favored test setting (bloom filter enabled, files are all in L1, files are all cached), I see the performance penalty is less than 3%.
Test Plan: make all check
Reviewers: haobo, ljin
Reviewed By: ljin
Subscribers: wuj, leveldb, zagfox, yhchiang
Differential Revision: https://reviews.facebook.net/D20595
2014-07-25 23:38:18 +02:00
|
|
|
uint32_t shared, non_shared, value_length;
|
2014-07-31 01:34:35 +02:00
|
|
|
const char* key_ptr =
|
|
|
|
DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
|
|
|
|
&non_shared, &value_length);
|
Block::Iter::PrefixSeek() to have an extra check to filter out some false matches
Summary:
In block based table's hash index checking, when looking for a key that doesn't exist, there is a high chance that a false block is returned because of hash bucket conflicts. In this revision, another check is done to filter out some of those cases: comparing previous key of the block boundary to see whether the target block is what we are looking for.
In a favored test setting (bloom filter disabled, 8 L0 files), I saw about 80% improvements. In a non-favored test setting (bloom filter enabled, files are all in L1, files are all cached), I see the performance penalty is less than 3%.
Test Plan: make all check
Reviewers: haobo, ljin
Reviewed By: ljin
Subscribers: wuj, leveldb, zagfox, yhchiang
Differential Revision: https://reviews.facebook.net/D20595
2014-07-25 23:38:18 +02:00
|
|
|
if (key_ptr == nullptr || (shared != 0)) {
|
|
|
|
CorruptionError();
|
2014-07-31 01:34:35 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
Slice mid_key(key_ptr, non_shared);
|
|
|
|
int cmp = Compare(mid_key, target);
|
|
|
|
if (cmp < 0) {
|
|
|
|
// Key at "mid" is smaller than "target". Therefore all
|
|
|
|
// blocks before "mid" are uninteresting.
|
|
|
|
left = mid;
|
|
|
|
} else if (cmp > 0) {
|
|
|
|
// Key at "mid" is >= "target". Therefore all blocks at or
|
|
|
|
// after "mid" are uninteresting.
|
|
|
|
right = mid - 1;
|
|
|
|
} else {
|
|
|
|
left = right = mid;
|
Block::Iter::PrefixSeek() to have an extra check to filter out some false matches
Summary:
In block based table's hash index checking, when looking for a key that doesn't exist, there is a high chance that a false block is returned because of hash bucket conflicts. In this revision, another check is done to filter out some of those cases: comparing previous key of the block boundary to see whether the target block is what we are looking for.
In a favored test setting (bloom filter disabled, 8 L0 files), I saw about 80% improvements. In a non-favored test setting (bloom filter enabled, files are all in L1, files are all cached), I see the performance penalty is less than 3%.
Test Plan: make all check
Reviewers: haobo, ljin
Reviewed By: ljin
Subscribers: wuj, leveldb, zagfox, yhchiang
Differential Revision: https://reviews.facebook.net/D20595
2014-07-25 23:38:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
*index = left;
|
|
|
|
return true;
|
|
|
|
}
|
2014-06-13 04:03:22 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
// Compare target key and the block key of the block of `block_index`.
|
|
|
|
// Return -1 if error.
|
|
|
|
int BlockIter::CompareBlockKey(uint32_t block_index, const Slice& target) {
|
|
|
|
uint32_t region_offset = GetRestartPoint(block_index);
|
|
|
|
uint32_t shared, non_shared, value_length;
|
|
|
|
const char* key_ptr = DecodeEntry(data_ + region_offset, data_ + restarts_,
|
|
|
|
&shared, &non_shared, &value_length);
|
|
|
|
if (key_ptr == nullptr || (shared != 0)) {
|
|
|
|
CorruptionError();
|
|
|
|
return 1; // Return target is smaller
|
|
|
|
}
|
|
|
|
Slice block_key(key_ptr, non_shared);
|
|
|
|
return Compare(block_key, target);
|
|
|
|
}
|
Block::Iter::PrefixSeek() to have an extra check to filter out some false matches
Summary:
In block based table's hash index checking, when looking for a key that doesn't exist, there is a high chance that a false block is returned because of hash bucket conflicts. In this revision, another check is done to filter out some of those cases: comparing previous key of the block boundary to see whether the target block is what we are looking for.
In a favored test setting (bloom filter disabled, 8 L0 files), I saw about 80% improvements. In a non-favored test setting (bloom filter enabled, files are all in L1, files are all cached), I see the performance penalty is less than 3%.
Test Plan: make all check
Reviewers: haobo, ljin
Reviewed By: ljin
Subscribers: wuj, leveldb, zagfox, yhchiang
Differential Revision: https://reviews.facebook.net/D20595
2014-07-25 23:38:18 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
// Binary search in block_ids to find the first block
|
|
|
|
// with a key >= target
|
|
|
|
bool BlockIter::BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids,
|
|
|
|
uint32_t left, uint32_t right,
|
|
|
|
uint32_t* index) {
|
|
|
|
assert(left <= right);
|
|
|
|
uint32_t left_bound = left;
|
2014-06-13 04:03:22 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
while (left <= right) {
|
|
|
|
uint32_t mid = (left + right) / 2;
|
Block::Iter::PrefixSeek() to have an extra check to filter out some false matches
Summary:
In block based table's hash index checking, when looking for a key that doesn't exist, there is a high chance that a false block is returned because of hash bucket conflicts. In this revision, another check is done to filter out some of those cases: comparing previous key of the block boundary to see whether the target block is what we are looking for.
In a favored test setting (bloom filter disabled, 8 L0 files), I saw about 80% improvements. In a non-favored test setting (bloom filter enabled, files are all in L1, files are all cached), I see the performance penalty is less than 3%.
Test Plan: make all check
Reviewers: haobo, ljin
Reviewed By: ljin
Subscribers: wuj, leveldb, zagfox, yhchiang
Differential Revision: https://reviews.facebook.net/D20595
2014-07-25 23:38:18 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
int cmp = CompareBlockKey(block_ids[mid], target);
|
|
|
|
if (!status_.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
return false;
|
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
if (cmp < 0) {
|
|
|
|
// Key at "target" is larger than "mid". Therefore all
|
|
|
|
// blocks before or at "mid" are uninteresting.
|
|
|
|
left = mid + 1;
|
|
|
|
} else {
|
|
|
|
// Key at "target" is <= "mid". Therefore all blocks
|
|
|
|
// after "mid" are uninteresting.
|
|
|
|
// If there is only one block left, we found it.
|
|
|
|
if (left == right) break;
|
|
|
|
right = mid;
|
|
|
|
}
|
2014-06-13 04:03:22 +02:00
|
|
|
}
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
if (left == right) {
|
|
|
|
// In one of the two following cases:
|
|
|
|
// (1) left is the first one of block_ids
|
|
|
|
// (2) there is a gap of blocks between block of `left` and `left-1`.
|
|
|
|
// we can further distinguish the case of key in the block or key not
|
|
|
|
// existing, by comparing the target key and the key of the previous
|
|
|
|
// block to the left of the block found.
|
|
|
|
if (block_ids[left] > 0 &&
|
|
|
|
(left == left_bound || block_ids[left - 1] != block_ids[left] - 1) &&
|
|
|
|
CompareBlockKey(block_ids[left] - 1, target) > 0) {
|
2014-04-10 23:19:43 +02:00
|
|
|
current_ = restarts_;
|
2014-06-13 04:03:22 +02:00
|
|
|
return false;
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
*index = block_ids[left];
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
assert(left > right);
|
|
|
|
// Mark iterator invalid
|
|
|
|
current_ = restarts_;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BlockIter::HashSeek(const Slice& target, uint32_t* index) {
|
|
|
|
assert(hash_index_);
|
|
|
|
auto restart_index = hash_index_->GetRestartIndex(target);
|
|
|
|
if (restart_index == nullptr) {
|
|
|
|
current_ = restarts_;
|
|
|
|
return false;
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
2014-06-13 04:03:22 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
// the elements in restart_array[index : index + num_blocks]
|
|
|
|
// are all with same prefix. We'll do binary search in that small range.
|
|
|
|
auto left = restart_index->first_index;
|
|
|
|
auto right = restart_index->first_index + restart_index->num_blocks - 1;
|
|
|
|
return BinarySeek(target, left, right, index);
|
|
|
|
}
|
2014-06-13 04:03:22 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
bool BlockIter::PrefixSeek(const Slice& target, uint32_t* index) {
|
|
|
|
assert(prefix_index_);
|
|
|
|
uint32_t* block_ids = nullptr;
|
|
|
|
uint32_t num_blocks = prefix_index_->GetBlocks(target, &block_ids);
|
2014-06-13 04:03:22 +02:00
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
if (num_blocks == 0) {
|
|
|
|
current_ = restarts_;
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return BinaryBlockIndexSeek(target, block_ids, 0, num_blocks - 1, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t Block::NumRestarts() const {
|
|
|
|
assert(size_ >= 2*sizeof(uint32_t));
|
|
|
|
return DecodeFixed32(data_ + size_ - sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
2014-09-18 01:45:58 +02:00
|
|
|
Block::Block(BlockContents&& contents)
|
|
|
|
: contents_(std::move(contents)),
|
|
|
|
data_(contents_.data.data()),
|
|
|
|
size_(contents_.data.size()) {
|
2014-07-31 01:34:35 +02:00
|
|
|
if (size_ < sizeof(uint32_t)) {
|
|
|
|
size_ = 0; // Error marker
|
|
|
|
} else {
|
2014-11-11 22:47:22 +01:00
|
|
|
restart_offset_ =
|
|
|
|
static_cast<uint32_t>(size_) - (1 + NumRestarts()) * sizeof(uint32_t);
|
2014-07-31 01:34:35 +02:00
|
|
|
if (restart_offset_ > size_ - sizeof(uint32_t)) {
|
|
|
|
// The size is too small for NumRestarts() and therefore
|
|
|
|
// restart_offset_ wrapped around.
|
|
|
|
size_ = 0;
|
2014-06-13 04:03:22 +02:00
|
|
|
}
|
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
|
|
|
|
2014-08-26 01:14:30 +02:00
|
|
|
Iterator* Block::NewIterator(
|
|
|
|
const Comparator* cmp, BlockIter* iter, bool total_order_seek) {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (size_ < 2*sizeof(uint32_t)) {
|
2014-07-31 01:34:35 +02:00
|
|
|
if (iter != nullptr) {
|
|
|
|
iter->SetStatus(Status::Corruption("bad block contents"));
|
|
|
|
return iter;
|
|
|
|
} else {
|
|
|
|
return NewErrorIterator(Status::Corruption("bad block contents"));
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
const uint32_t num_restarts = NumRestarts();
|
|
|
|
if (num_restarts == 0) {
|
2014-07-31 01:34:35 +02:00
|
|
|
if (iter != nullptr) {
|
|
|
|
iter->SetStatus(Status::OK());
|
|
|
|
return iter;
|
|
|
|
} else {
|
|
|
|
return NewEmptyIterator();
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
2014-08-26 01:14:30 +02:00
|
|
|
BlockHashIndex* hash_index_ptr =
|
|
|
|
total_order_seek ? nullptr : hash_index_.get();
|
|
|
|
BlockPrefixIndex* prefix_index_ptr =
|
|
|
|
total_order_seek ? nullptr : prefix_index_.get();
|
|
|
|
|
2014-07-31 01:34:35 +02:00
|
|
|
if (iter != nullptr) {
|
|
|
|
iter->Initialize(cmp, data_, restart_offset_, num_restarts,
|
2014-08-26 01:14:30 +02:00
|
|
|
hash_index_ptr, prefix_index_ptr);
|
2014-07-31 01:34:35 +02:00
|
|
|
} else {
|
|
|
|
iter = new BlockIter(cmp, data_, restart_offset_, num_restarts,
|
2014-08-26 01:14:30 +02:00
|
|
|
hash_index_ptr, prefix_index_ptr);
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-07-31 01:34:35 +02:00
|
|
|
|
|
|
|
return iter;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
void Block::SetBlockHashIndex(BlockHashIndex* hash_index) {
|
|
|
|
hash_index_.reset(hash_index);
|
|
|
|
}
|
|
|
|
|
2014-06-13 04:03:22 +02:00
|
|
|
void Block::SetBlockPrefixIndex(BlockPrefixIndex* prefix_index) {
|
|
|
|
prefix_index_.reset(prefix_index);
|
|
|
|
}
|
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
size_t Block::ApproximateMemoryUsage() const {
|
|
|
|
size_t usage = size();
|
|
|
|
if (hash_index_) {
|
|
|
|
usage += hash_index_->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
if (prefix_index_) {
|
|
|
|
usage += prefix_index_->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
return usage;
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|