2013-10-29 04:34:02 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2014-04-15 22:39:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-07-19 01:58:13 +02:00
|
|
|
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "table/plain_table_reader.h"
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
#include <string>
|
2014-02-14 00:27:59 +01:00
|
|
|
#include <vector>
|
2013-10-29 04:34:02 +01:00
|
|
|
|
|
|
|
#include "db/dbformat.h"
|
|
|
|
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
|
|
|
|
#include "table/block.h"
|
2014-07-19 01:58:13 +02:00
|
|
|
#include "table/bloom_block.h"
|
2014-08-16 00:05:09 +02:00
|
|
|
#include "table/filter_block.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "table/format.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
#include "table/meta_blocks.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "table/two_level_iterator.h"
|
2014-01-28 06:58:46 +01:00
|
|
|
#include "table/plain_table_factory.h"
|
2014-06-19 01:36:48 +02:00
|
|
|
#include "table/plain_table_key_coding.h"
|
2014-09-29 20:09:09 +02:00
|
|
|
#include "table/get_context.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2014-05-04 22:55:53 +02:00
|
|
|
#include "util/arena.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "util/coding.h"
|
2013-12-20 18:35:24 +01:00
|
|
|
#include "util/dynamic_bloom.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "util/hash.h"
|
|
|
|
#include "util/histogram.h"
|
2013-11-21 20:11:02 +01:00
|
|
|
#include "util/murmurhash.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "util/perf_context_imp.h"
|
|
|
|
#include "util/stop_watch.h"
|
2014-11-25 05:44:49 +01:00
|
|
|
#include "util/string_util.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2013-11-21 20:11:02 +01:00
|
|
|
|
2013-10-29 04:34:02 +01:00
|
|
|
namespace rocksdb {
|
|
|
|
|
2014-01-25 06:10:19 +01:00
|
|
|
namespace {
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2014-02-14 00:27:59 +01:00
|
|
|
// Safely getting a uint32_t element from a char array, where, starting from
|
|
|
|
// `base`, every 4 bytes are considered as an fixed 32 bit integer.
|
|
|
|
inline uint32_t GetFixed32Element(const char* base, size_t offset) {
|
|
|
|
return DecodeFixed32(base + offset * sizeof(uint32_t));
|
|
|
|
}
|
2014-01-25 06:10:19 +01:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Iterator to iterate IndexedTable
|
2015-10-13 00:06:38 +02:00
|
|
|
class PlainTableIterator : public InternalIterator {
|
2014-01-25 06:10:19 +01:00
|
|
|
public:
|
2014-02-08 01:25:38 +01:00
|
|
|
explicit PlainTableIterator(PlainTableReader* table, bool use_prefix_seek);
|
2014-01-25 06:10:19 +01:00
|
|
|
~PlainTableIterator();
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
bool Valid() const override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void SeekToFirst() override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void SeekToLast() override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void Seek(const Slice& target) override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void Next() override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void Prev() override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
Slice key() const override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
Slice value() const override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
Status status() const override;
|
2014-01-25 06:10:19 +01:00
|
|
|
|
|
|
|
private:
|
|
|
|
PlainTableReader* table_;
|
2014-06-19 01:36:48 +02:00
|
|
|
PlainTableKeyDecoder decoder_;
|
2014-02-08 01:25:38 +01:00
|
|
|
bool use_prefix_seek_;
|
2014-01-25 06:10:19 +01:00
|
|
|
uint32_t offset_;
|
|
|
|
uint32_t next_offset_;
|
2014-06-19 01:36:48 +02:00
|
|
|
Slice key_;
|
2014-01-25 06:10:19 +01:00
|
|
|
Slice value_;
|
|
|
|
Status status_;
|
|
|
|
// No copying allowed
|
|
|
|
PlainTableIterator(const PlainTableIterator&) = delete;
|
|
|
|
void operator=(const Iterator&) = delete;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
2014-09-05 01:18:36 +02:00
|
|
|
PlainTableReader::PlainTableReader(const ImmutableCFOptions& ioptions,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<RandomAccessFileReader>&& file,
|
2014-06-09 21:30:19 +02:00
|
|
|
const EnvOptions& storage_options,
|
|
|
|
const InternalKeyComparator& icomparator,
|
2014-06-19 01:36:48 +02:00
|
|
|
EncodingType encoding_type,
|
2014-06-09 21:30:19 +02:00
|
|
|
uint64_t file_size,
|
|
|
|
const TableProperties* table_properties)
|
|
|
|
: internal_comparator_(icomparator),
|
2014-06-19 01:36:48 +02:00
|
|
|
encoding_type_(encoding_type),
|
2014-07-19 01:58:13 +02:00
|
|
|
full_scan_mode_(false),
|
2014-11-11 22:47:22 +01:00
|
|
|
user_key_len_(static_cast<uint32_t>(table_properties->fixed_key_len)),
|
2014-09-05 01:18:36 +02:00
|
|
|
prefix_extractor_(ioptions.prefix_extractor),
|
2014-06-09 21:30:19 +02:00
|
|
|
enable_bloom_(false),
|
|
|
|
bloom_(6, nullptr),
|
2015-09-17 01:57:43 +02:00
|
|
|
file_info_(std::move(file), storage_options,
|
|
|
|
static_cast<uint32_t>(table_properties->data_size)),
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions_(ioptions),
|
2014-06-09 21:30:19 +02:00
|
|
|
file_size_(file_size),
|
|
|
|
table_properties_(nullptr) {}
|
2013-10-29 04:34:02 +01:00
|
|
|
|
|
|
|
PlainTableReader::~PlainTableReader() {
|
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
Status PlainTableReader::Open(const ImmutableCFOptions& ioptions,
|
|
|
|
const EnvOptions& env_options,
|
2014-05-04 22:55:53 +02:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<RandomAccessFileReader>&& file,
|
2014-05-04 22:55:53 +02:00
|
|
|
uint64_t file_size,
|
|
|
|
unique_ptr<TableReader>* table_reader,
|
|
|
|
const int bloom_bits_per_key,
|
|
|
|
double hash_table_ratio, size_t index_sparseness,
|
2014-06-19 01:36:48 +02:00
|
|
|
size_t huge_page_tlb_size, bool full_scan_mode) {
|
2014-07-19 01:58:13 +02:00
|
|
|
if (file_size > PlainTableIndex::kMaxFileSize) {
|
2013-11-21 20:11:02 +01:00
|
|
|
return Status::NotSupported("File is too large for PlainTableReader!");
|
|
|
|
}
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
TableProperties* props = nullptr;
|
2014-01-25 06:10:19 +01:00
|
|
|
auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
|
2016-07-19 18:44:03 +02:00
|
|
|
ioptions, &props);
|
2013-12-06 01:51:26 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-06-09 21:30:19 +02:00
|
|
|
assert(hash_table_ratio >= 0.0);
|
2014-06-19 01:36:48 +02:00
|
|
|
auto& user_props = props->user_collected_properties;
|
2016-08-26 20:46:32 +02:00
|
|
|
auto prefix_extractor_in_file = props->prefix_extractor_name;
|
2014-06-19 01:36:48 +02:00
|
|
|
|
2016-08-26 20:46:32 +02:00
|
|
|
if (!full_scan_mode &&
|
|
|
|
!prefix_extractor_in_file.empty() /* old version sst file*/
|
|
|
|
&& prefix_extractor_in_file != "nullptr") {
|
2014-09-05 01:18:36 +02:00
|
|
|
if (!ioptions.prefix_extractor) {
|
2014-06-19 01:36:48 +02:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Prefix extractor is missing when opening a PlainTable built "
|
|
|
|
"using a prefix extractor");
|
2016-08-26 20:46:32 +02:00
|
|
|
} else if (prefix_extractor_in_file.compare(
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions.prefix_extractor->Name()) != 0) {
|
2014-06-19 01:36:48 +02:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Prefix extractor given doesn't match the one used to build "
|
|
|
|
"PlainTable");
|
|
|
|
}
|
|
|
|
}
|
2013-12-06 01:51:26 +01:00
|
|
|
|
2014-06-19 01:36:48 +02:00
|
|
|
EncodingType encoding_type = kPlain;
|
|
|
|
auto encoding_type_prop =
|
|
|
|
user_props.find(PlainTablePropertyNames::kEncodingType);
|
|
|
|
if (encoding_type_prop != user_props.end()) {
|
|
|
|
encoding_type = static_cast<EncodingType>(
|
|
|
|
DecodeFixed32(encoding_type_prop->second.c_str()));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<PlainTableReader> new_reader(new PlainTableReader(
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions, std::move(file), env_options, internal_comparator,
|
|
|
|
encoding_type, file_size, props));
|
2014-06-19 01:36:48 +02:00
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
s = new_reader->MmapDataIfNeeded();
|
2013-10-29 04:34:02 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2013-12-06 01:51:26 +01:00
|
|
|
|
2014-06-19 01:36:48 +02:00
|
|
|
if (!full_scan_mode) {
|
|
|
|
s = new_reader->PopulateIndex(props, bloom_bits_per_key, hash_table_ratio,
|
|
|
|
index_sparseness, huge_page_tlb_size);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Flag to indicate it is a full scan mode so that none of the indexes
|
|
|
|
// can be used.
|
2014-07-19 01:58:13 +02:00
|
|
|
new_reader->full_scan_mode_ = true;
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
*table_reader = std::move(new_reader);
|
2013-10-29 04:34:02 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableReader::SetupForCompaction() {
|
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
Arena* arena,
|
|
|
|
bool skip_filters) {
|
2014-08-26 01:14:30 +02:00
|
|
|
if (options.total_order_seek && !IsTotalOrderMode()) {
|
2015-10-13 00:06:38 +02:00
|
|
|
return NewErrorInternalIterator(
|
2014-08-26 01:14:30 +02:00
|
|
|
Status::InvalidArgument("total_order_seek not supported"), arena);
|
|
|
|
}
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
if (arena == nullptr) {
|
2014-06-19 01:36:48 +02:00
|
|
|
return new PlainTableIterator(this, prefix_extractor_ != nullptr);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(PlainTableIterator));
|
2014-06-19 01:36:48 +02:00
|
|
|
return new (mem) PlainTableIterator(this, prefix_extractor_ != nullptr);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
Status PlainTableReader::PopulateIndexRecordList(
|
|
|
|
PlainTableIndexBuilder* index_builder, vector<uint32_t>* prefix_hashes) {
|
2013-10-29 04:34:02 +01:00
|
|
|
Slice prev_key_prefix_slice;
|
2015-09-17 01:57:43 +02:00
|
|
|
std::string prev_key_prefix_buf;
|
2013-11-21 20:11:02 +01:00
|
|
|
uint32_t pos = data_start_offset_;
|
2013-12-20 18:35:24 +01:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
bool is_first_record = true;
|
|
|
|
Slice key_prefix_slice;
|
2015-09-17 01:57:43 +02:00
|
|
|
PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_,
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions_.prefix_extractor);
|
2015-09-17 01:57:43 +02:00
|
|
|
while (pos < file_info_.data_end_offset) {
|
2013-11-21 20:11:02 +01:00
|
|
|
uint32_t key_offset = pos;
|
2014-01-27 22:53:22 +01:00
|
|
|
ParsedInternalKey key;
|
2014-01-25 06:10:19 +01:00
|
|
|
Slice value_slice;
|
2014-06-19 01:36:48 +02:00
|
|
|
bool seekable = false;
|
|
|
|
Status s = Next(&decoder, &pos, &key, nullptr, &value_slice, &seekable);
|
2014-02-08 01:25:38 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-07-19 01:58:13 +02:00
|
|
|
|
|
|
|
key_prefix_slice = GetPrefix(key);
|
2014-06-09 21:30:19 +02:00
|
|
|
if (enable_bloom_) {
|
|
|
|
bloom_.AddHash(GetSliceHash(key.user_key));
|
2014-07-19 01:58:13 +02:00
|
|
|
} else {
|
|
|
|
if (is_first_record || prev_key_prefix_slice != key_prefix_slice) {
|
|
|
|
if (!is_first_record) {
|
|
|
|
prefix_hashes->push_back(GetSliceHash(prev_key_prefix_slice));
|
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
if (file_info_.is_mmap_mode) {
|
|
|
|
prev_key_prefix_slice = key_prefix_slice;
|
|
|
|
} else {
|
|
|
|
prev_key_prefix_buf = key_prefix_slice.ToString();
|
|
|
|
prev_key_prefix_slice = prev_key_prefix_buf;
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
index_builder->AddKeyPrefix(GetPrefix(key), key_offset);
|
2014-06-19 01:36:48 +02:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
if (!seekable && is_first_record) {
|
|
|
|
return Status::Corruption("Key for a prefix is not seekable");
|
2013-11-21 20:11:02 +01:00
|
|
|
}
|
2014-07-19 01:58:13 +02:00
|
|
|
|
2014-01-25 06:10:19 +01:00
|
|
|
is_first_record = false;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
2014-01-25 06:10:19 +01:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
prefix_hashes->push_back(GetSliceHash(key_prefix_slice));
|
2014-07-21 19:31:33 +02:00
|
|
|
auto s = index_.InitFromRawData(index_builder->Finish());
|
|
|
|
return s;
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
void PlainTableReader::AllocateAndFillBloom(int bloom_bits_per_key,
|
|
|
|
int num_prefixes,
|
|
|
|
size_t huge_page_tlb_size,
|
|
|
|
vector<uint32_t>* prefix_hashes) {
|
|
|
|
if (!IsTotalOrderMode()) {
|
2014-06-09 21:30:19 +02:00
|
|
|
uint32_t bloom_total_bits = num_prefixes * bloom_bits_per_key;
|
2014-02-08 01:25:38 +01:00
|
|
|
if (bloom_total_bits > 0) {
|
2014-06-09 21:30:19 +02:00
|
|
|
enable_bloom_ = true;
|
2014-09-05 01:18:36 +02:00
|
|
|
bloom_.SetTotalBits(&arena_, bloom_total_bits, ioptions_.bloom_locality,
|
|
|
|
huge_page_tlb_size, ioptions_.info_log);
|
2014-07-19 01:58:13 +02:00
|
|
|
FillBloom(prefix_hashes);
|
2014-02-08 01:25:38 +01:00
|
|
|
}
|
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
2013-11-21 20:11:02 +01:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
void PlainTableReader::FillBloom(vector<uint32_t>* prefix_hashes) {
|
|
|
|
assert(bloom_.IsInitialized());
|
|
|
|
for (auto prefix_hash : *prefix_hashes) {
|
|
|
|
bloom_.AddHash(prefix_hash);
|
2013-11-21 20:11:02 +01:00
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
}
|
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
Status PlainTableReader::MmapDataIfNeeded() {
|
|
|
|
if (file_info_.is_mmap_mode) {
|
|
|
|
// Get mmapped memory.
|
|
|
|
return file_info_.file->Read(0, file_size_, &file_info_.file_data, nullptr);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
|
2014-06-09 21:30:19 +02:00
|
|
|
Status PlainTableReader::PopulateIndex(TableProperties* props,
|
|
|
|
int bloom_bits_per_key,
|
|
|
|
double hash_table_ratio,
|
|
|
|
size_t index_sparseness,
|
|
|
|
size_t huge_page_tlb_size) {
|
2014-04-23 03:31:55 +02:00
|
|
|
assert(props != nullptr);
|
|
|
|
table_properties_.reset(props);
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
BlockContents bloom_block_contents;
|
2015-09-17 01:57:43 +02:00
|
|
|
auto s = ReadMetaBlock(file_info_.file.get(), file_size_,
|
2016-07-19 18:44:03 +02:00
|
|
|
kPlainTableMagicNumber, ioptions_,
|
2015-09-17 01:57:43 +02:00
|
|
|
BloomBlockBuilder::kBloomBlock, &bloom_block_contents);
|
2014-07-19 01:58:13 +02:00
|
|
|
bool index_in_file = s.ok();
|
|
|
|
|
|
|
|
BlockContents index_block_contents;
|
2015-09-17 01:57:43 +02:00
|
|
|
s = ReadMetaBlock(
|
2016-07-19 18:44:03 +02:00
|
|
|
file_info_.file.get(), file_size_, kPlainTableMagicNumber, ioptions_,
|
2015-09-17 01:57:43 +02:00
|
|
|
PlainTableIndexBuilder::kPlainTableIndexBlock, &index_block_contents);
|
2014-07-19 01:58:13 +02:00
|
|
|
|
|
|
|
index_in_file &= s.ok();
|
|
|
|
|
|
|
|
Slice* bloom_block;
|
|
|
|
if (index_in_file) {
|
2015-09-17 01:57:43 +02:00
|
|
|
// If bloom_block_contents.allocation is not empty (which will be the case
|
|
|
|
// for non-mmap mode), it holds the alloated memory for the bloom block.
|
|
|
|
// It needs to be kept alive to keep `bloom_block` valid.
|
|
|
|
bloom_block_alloc_ = std::move(bloom_block_contents.allocation);
|
2014-07-19 01:58:13 +02:00
|
|
|
bloom_block = &bloom_block_contents.data;
|
|
|
|
} else {
|
|
|
|
bloom_block = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// index_in_file == true only if there are kBloomBlock and
|
2015-09-17 01:57:43 +02:00
|
|
|
// kPlainTableIndexBlock in file
|
2014-07-19 01:58:13 +02:00
|
|
|
Slice* index_block;
|
|
|
|
if (index_in_file) {
|
2015-09-17 01:57:43 +02:00
|
|
|
// If index_block_contents.allocation is not empty (which will be the case
|
|
|
|
// for non-mmap mode), it holds the alloated memory for the index block.
|
|
|
|
// It needs to be kept alive to keep `index_block` valid.
|
|
|
|
index_block_alloc_ = std::move(index_block_contents.allocation);
|
2014-07-19 01:58:13 +02:00
|
|
|
index_block = &index_block_contents.data;
|
|
|
|
} else {
|
|
|
|
index_block = nullptr;
|
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
if ((ioptions_.prefix_extractor == nullptr) &&
|
|
|
|
(hash_table_ratio != 0)) {
|
|
|
|
// ioptions.prefix_extractor is requried for a hash-based look-up.
|
2014-02-08 01:25:38 +01:00
|
|
|
return Status::NotSupported(
|
|
|
|
"PlainTable requires a prefix extractor enable prefix hash mode.");
|
|
|
|
}
|
|
|
|
|
2014-01-25 06:10:19 +01:00
|
|
|
// First, read the whole file, for every kIndexIntervalForSamePrefixKeys rows
|
|
|
|
// for a prefix (starting from the first one), generate a record of (hash,
|
|
|
|
// offset) and append it to IndexRecordList, which is a data structure created
|
|
|
|
// to store them.
|
2014-02-08 01:25:38 +01:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
if (!index_in_file) {
|
|
|
|
// Allocate bloom filter here for total order mode.
|
|
|
|
if (IsTotalOrderMode()) {
|
|
|
|
uint32_t num_bloom_bits =
|
2014-11-11 22:47:22 +01:00
|
|
|
static_cast<uint32_t>(table_properties_->num_entries) *
|
|
|
|
bloom_bits_per_key;
|
2014-07-19 01:58:13 +02:00
|
|
|
if (num_bloom_bits > 0) {
|
|
|
|
enable_bloom_ = true;
|
2014-09-05 01:18:36 +02:00
|
|
|
bloom_.SetTotalBits(&arena_, num_bloom_bits, ioptions_.bloom_locality,
|
|
|
|
huge_page_tlb_size, ioptions_.info_log);
|
2014-07-19 01:58:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
enable_bloom_ = true;
|
|
|
|
auto num_blocks_property = props->user_collected_properties.find(
|
|
|
|
PlainTablePropertyNames::kNumBloomBlocks);
|
|
|
|
|
|
|
|
uint32_t num_blocks = 0;
|
|
|
|
if (num_blocks_property != props->user_collected_properties.end()) {
|
|
|
|
Slice temp_slice(num_blocks_property->second);
|
|
|
|
if (!GetVarint32(&temp_slice, &num_blocks)) {
|
|
|
|
num_blocks = 0;
|
|
|
|
}
|
2014-02-08 01:25:38 +01:00
|
|
|
}
|
2014-07-19 01:58:13 +02:00
|
|
|
// cast away const qualifier, because bloom_ won't be changed
|
|
|
|
bloom_.SetRawData(
|
|
|
|
const_cast<unsigned char*>(
|
|
|
|
reinterpret_cast<const unsigned char*>(bloom_block->data())),
|
2014-11-11 22:47:22 +01:00
|
|
|
static_cast<uint32_t>(bloom_block->size()) * 8, num_blocks);
|
2014-02-08 01:25:38 +01:00
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
PlainTableIndexBuilder index_builder(&arena_, ioptions_, index_sparseness,
|
2014-07-19 01:58:13 +02:00
|
|
|
hash_table_ratio, huge_page_tlb_size);
|
|
|
|
|
|
|
|
std::vector<uint32_t> prefix_hashes;
|
|
|
|
if (!index_in_file) {
|
2014-10-31 19:59:54 +01:00
|
|
|
s = PopulateIndexRecordList(&index_builder, &prefix_hashes);
|
2014-07-19 01:58:13 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} else {
|
2014-10-31 19:59:54 +01:00
|
|
|
s = index_.InitFromRawData(*index_block);
|
2014-07-21 19:31:33 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-07-19 01:58:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!index_in_file) {
|
|
|
|
// Calculated bloom filter size and allocate memory for
|
|
|
|
// bloom filter based on the number of prefixes, then fill it.
|
|
|
|
AllocateAndFillBloom(bloom_bits_per_key, index_.GetNumPrefixes(),
|
|
|
|
huge_page_tlb_size, &prefix_hashes);
|
2014-02-08 01:25:38 +01:00
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2014-04-23 03:31:55 +02:00
|
|
|
// Fill two table properties.
|
2014-07-19 01:58:13 +02:00
|
|
|
if (!index_in_file) {
|
|
|
|
props->user_collected_properties["plain_table_hash_table_size"] =
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(index_.GetIndexSize() * PlainTableIndex::kOffsetLen);
|
2014-07-19 01:58:13 +02:00
|
|
|
props->user_collected_properties["plain_table_sub_index_size"] =
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(index_.GetSubIndexSize());
|
2014-07-19 01:58:13 +02:00
|
|
|
} else {
|
|
|
|
props->user_collected_properties["plain_table_hash_table_size"] =
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(0);
|
2014-07-19 01:58:13 +02:00
|
|
|
props->user_collected_properties["plain_table_sub_index_size"] =
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(0);
|
2014-07-19 01:58:13 +02:00
|
|
|
}
|
2014-04-23 03:31:55 +02:00
|
|
|
|
2013-10-29 04:34:02 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-11-18 03:29:40 +01:00
|
|
|
Status PlainTableReader::GetOffset(PlainTableKeyDecoder* decoder,
|
|
|
|
const Slice& target, const Slice& prefix,
|
2013-12-20 18:35:24 +01:00
|
|
|
uint32_t prefix_hash, bool& prefix_matched,
|
2014-02-14 00:27:59 +01:00
|
|
|
uint32_t* offset) const {
|
2013-11-21 20:11:02 +01:00
|
|
|
prefix_matched = false;
|
2014-07-19 01:58:13 +02:00
|
|
|
uint32_t prefix_index_offset;
|
|
|
|
auto res = index_.GetOffset(prefix_hash, &prefix_index_offset);
|
|
|
|
if (res == PlainTableIndex::kNoPrefixForBucket) {
|
2015-09-17 01:57:43 +02:00
|
|
|
*offset = file_info_.data_end_offset;
|
2013-12-20 18:35:24 +01:00
|
|
|
return Status::OK();
|
2014-07-19 01:58:13 +02:00
|
|
|
} else if (res == PlainTableIndex::kDirectToFile) {
|
|
|
|
*offset = prefix_index_offset;
|
2013-12-20 18:35:24 +01:00
|
|
|
return Status::OK();
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
// point to sub-index, need to do a binary search
|
2014-07-19 01:58:13 +02:00
|
|
|
uint32_t upper_bound;
|
|
|
|
const char* base_ptr =
|
|
|
|
index_.GetSubIndexBasePtrAndUpperBound(prefix_index_offset, &upper_bound);
|
2013-10-29 04:34:02 +01:00
|
|
|
uint32_t low = 0;
|
2013-11-21 20:11:02 +01:00
|
|
|
uint32_t high = upper_bound;
|
2014-01-27 22:53:22 +01:00
|
|
|
ParsedInternalKey mid_key;
|
|
|
|
ParsedInternalKey parsed_target;
|
|
|
|
if (!ParseInternalKey(target, &parsed_target)) {
|
|
|
|
return Status::Corruption(Slice());
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2013-11-21 20:11:02 +01:00
|
|
|
// The key is between [low, high). Do a binary search between it.
|
2013-10-29 04:34:02 +01:00
|
|
|
while (high - low > 1) {
|
|
|
|
uint32_t mid = (high + low) / 2;
|
2014-02-14 00:27:59 +01:00
|
|
|
uint32_t file_offset = GetFixed32Element(base_ptr, mid);
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t tmp;
|
2015-11-18 03:29:40 +01:00
|
|
|
Status s = decoder->NextKeyNoValue(file_offset, &mid_key, nullptr, &tmp);
|
2013-12-20 18:35:24 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
int cmp_result = internal_comparator_.Compare(mid_key, parsed_target);
|
|
|
|
if (cmp_result < 0) {
|
2013-10-29 04:34:02 +01:00
|
|
|
low = mid;
|
|
|
|
} else {
|
|
|
|
if (cmp_result == 0) {
|
|
|
|
// Happen to have found the exact key or target is smaller than the
|
|
|
|
// first key after base_offset.
|
2013-11-21 20:11:02 +01:00
|
|
|
prefix_matched = true;
|
2014-02-14 00:27:59 +01:00
|
|
|
*offset = file_offset;
|
2013-12-20 18:35:24 +01:00
|
|
|
return Status::OK();
|
2013-10-29 04:34:02 +01:00
|
|
|
} else {
|
|
|
|
high = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
// Both of the key at the position low or low+1 could share the same
|
|
|
|
// prefix as target. We need to rule out one of them to avoid to go
|
|
|
|
// to the wrong prefix.
|
2014-01-27 22:53:22 +01:00
|
|
|
ParsedInternalKey low_key;
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t tmp;
|
2014-02-14 00:27:59 +01:00
|
|
|
uint32_t low_key_offset = GetFixed32Element(base_ptr, low);
|
2015-11-18 03:29:40 +01:00
|
|
|
Status s = decoder->NextKeyNoValue(low_key_offset, &low_key, nullptr, &tmp);
|
2014-06-19 01:36:48 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
if (GetPrefix(low_key) == prefix) {
|
|
|
|
prefix_matched = true;
|
2014-02-14 00:27:59 +01:00
|
|
|
*offset = low_key_offset;
|
2013-12-20 18:35:24 +01:00
|
|
|
} else if (low + 1 < upper_bound) {
|
|
|
|
// There is possible a next prefix, return it
|
2013-11-21 20:11:02 +01:00
|
|
|
prefix_matched = false;
|
2014-02-14 00:27:59 +01:00
|
|
|
*offset = GetFixed32Element(base_ptr, low + 1);
|
2013-12-20 18:35:24 +01:00
|
|
|
} else {
|
|
|
|
// target is larger than a key of the last prefix in this bucket
|
|
|
|
// but with a different prefix. Key does not exist.
|
2015-09-17 01:57:43 +02:00
|
|
|
*offset = file_info_.data_end_offset;
|
2013-11-21 20:11:02 +01:00
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
return Status::OK();
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
2014-02-08 01:25:38 +01:00
|
|
|
bool PlainTableReader::MatchBloom(uint32_t hash) const {
|
2015-10-07 20:23:20 +02:00
|
|
|
if (!enable_bloom_) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bloom_.MayContainHash(hash)) {
|
|
|
|
PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
|
|
|
|
return false;
|
|
|
|
}
|
2013-11-22 00:13:45 +01:00
|
|
|
}
|
|
|
|
|
2014-06-19 01:36:48 +02:00
|
|
|
Status PlainTableReader::Next(PlainTableKeyDecoder* decoder, uint32_t* offset,
|
|
|
|
ParsedInternalKey* parsed_key,
|
|
|
|
Slice* internal_key, Slice* value,
|
|
|
|
bool* seekable) const {
|
2015-09-17 01:57:43 +02:00
|
|
|
if (*offset == file_info_.data_end_offset) {
|
|
|
|
*offset = file_info_.data_end_offset;
|
2013-11-21 20:11:02 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
if (*offset > file_info_.data_end_offset) {
|
2013-11-21 20:11:02 +01:00
|
|
|
return Status::Corruption("Offset is out of file size");
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t bytes_read;
|
|
|
|
Status s = decoder->NextKey(*offset, parsed_key, internal_key, value,
|
|
|
|
&bytes_read, seekable);
|
2014-02-26 23:36:54 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
*offset = *offset + bytes_read;
|
2013-11-21 20:11:02 +01:00
|
|
|
return Status::OK();
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
2014-06-12 19:06:18 +02:00
|
|
|
void PlainTableReader::Prepare(const Slice& target) {
|
|
|
|
if (enable_bloom_) {
|
|
|
|
uint32_t prefix_hash = GetSliceHash(GetPrefix(target));
|
|
|
|
bloom_.Prefetch(prefix_hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
GetContext* get_context, bool skip_filters) {
|
2013-11-21 20:11:02 +01:00
|
|
|
// Check bloom filter first.
|
2014-02-08 01:25:38 +01:00
|
|
|
Slice prefix_slice;
|
|
|
|
uint32_t prefix_hash;
|
|
|
|
if (IsTotalOrderMode()) {
|
2014-07-19 01:58:13 +02:00
|
|
|
if (full_scan_mode_) {
|
2014-06-19 01:36:48 +02:00
|
|
|
status_ =
|
|
|
|
Status::InvalidArgument("Get() is not allowed in full scan mode.");
|
|
|
|
}
|
2014-02-08 01:25:38 +01:00
|
|
|
// Match whole user key for bloom filter check.
|
|
|
|
if (!MatchBloom(GetSliceHash(GetUserKey(target)))) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
// in total order mode, there is only one bucket 0, and we always use empty
|
|
|
|
// prefix.
|
|
|
|
prefix_slice = Slice();
|
|
|
|
prefix_hash = 0;
|
|
|
|
} else {
|
|
|
|
prefix_slice = GetPrefix(target);
|
|
|
|
prefix_hash = GetSliceHash(prefix_slice);
|
|
|
|
if (!MatchBloom(prefix_hash)) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
2013-11-21 20:11:02 +01:00
|
|
|
uint32_t offset;
|
|
|
|
bool prefix_match;
|
2015-11-18 03:29:40 +01:00
|
|
|
PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_,
|
|
|
|
ioptions_.prefix_extractor);
|
|
|
|
Status s = GetOffset(&decoder, target, prefix_slice, prefix_hash,
|
|
|
|
prefix_match, &offset);
|
2015-09-17 01:57:43 +02:00
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
ParsedInternalKey found_key;
|
|
|
|
ParsedInternalKey parsed_target;
|
|
|
|
if (!ParseInternalKey(target, &parsed_target)) {
|
|
|
|
return Status::Corruption(Slice());
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
Slice found_value;
|
2015-09-17 01:57:43 +02:00
|
|
|
while (offset < file_info_.data_end_offset) {
|
2014-10-31 19:59:54 +01:00
|
|
|
s = Next(&decoder, &offset, &found_key, nullptr, &found_value);
|
2013-11-21 20:11:02 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (!prefix_match) {
|
|
|
|
// Need to verify prefix for the first key found if it is not yet
|
|
|
|
// checked.
|
2013-12-20 18:35:24 +01:00
|
|
|
if (GetPrefix(found_key) != prefix_slice) {
|
|
|
|
return Status::OK();
|
2013-11-21 20:11:02 +01:00
|
|
|
}
|
|
|
|
prefix_match = true;
|
|
|
|
}
|
2014-09-29 20:09:09 +02:00
|
|
|
// TODO(ljin): since we know the key comparison result here,
|
|
|
|
// can we enable the fast path?
|
2014-01-27 22:53:22 +01:00
|
|
|
if (internal_comparator_.Compare(found_key, parsed_target) >= 0) {
|
2014-09-29 20:09:09 +02:00
|
|
|
if (!get_context->SaveValue(found_key, found_value)) {
|
2014-01-27 22:53:22 +01:00
|
|
|
break;
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-21 20:11:02 +01:00
|
|
|
return Status::OK();
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& key) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-08 01:25:38 +01:00
|
|
|
PlainTableIterator::PlainTableIterator(PlainTableReader* table,
|
|
|
|
bool use_prefix_seek)
|
2014-06-19 01:36:48 +02:00
|
|
|
: table_(table),
|
2015-09-17 01:57:43 +02:00
|
|
|
decoder_(&table_->file_info_, table_->encoding_type_,
|
|
|
|
table_->user_key_len_, table_->prefix_extractor_),
|
2014-06-19 01:36:48 +02:00
|
|
|
use_prefix_seek_(use_prefix_seek) {
|
2015-09-17 01:57:43 +02:00
|
|
|
next_offset_ = offset_ = table_->file_info_.data_end_offset;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
PlainTableIterator::~PlainTableIterator() {
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PlainTableIterator::Valid() const {
|
2015-09-17 01:57:43 +02:00
|
|
|
return offset_ < table_->file_info_.data_end_offset &&
|
|
|
|
offset_ >= table_->data_start_offset_;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::SeekToFirst() {
|
2013-11-21 20:11:02 +01:00
|
|
|
next_offset_ = table_->data_start_offset_;
|
2015-09-17 01:57:43 +02:00
|
|
|
if (next_offset_ >= table_->file_info_.data_end_offset) {
|
|
|
|
next_offset_ = offset_ = table_->file_info_.data_end_offset;
|
2013-12-20 18:35:24 +01:00
|
|
|
} else {
|
|
|
|
Next();
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::SeekToLast() {
|
|
|
|
assert(false);
|
2014-02-08 01:25:38 +01:00
|
|
|
status_ = Status::NotSupported("SeekToLast() is not supported in PlainTable");
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::Seek(const Slice& target) {
|
2014-02-08 01:25:38 +01:00
|
|
|
// If the user doesn't set prefix seek option and we are not able to do a
|
|
|
|
// total Seek(). assert failure.
|
2014-06-19 01:36:48 +02:00
|
|
|
if (!use_prefix_seek_) {
|
2014-07-19 01:58:13 +02:00
|
|
|
if (table_->full_scan_mode_) {
|
2014-06-19 01:36:48 +02:00
|
|
|
status_ =
|
|
|
|
Status::InvalidArgument("Seek() is not allowed in full scan mode.");
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-06-19 01:36:48 +02:00
|
|
|
return;
|
2014-07-19 01:58:13 +02:00
|
|
|
} else if (table_->GetIndexSize() > 1) {
|
2014-06-19 01:36:48 +02:00
|
|
|
assert(false);
|
|
|
|
status_ = Status::NotSupported(
|
|
|
|
"PlainTable cannot issue non-prefix seek unless in total order "
|
|
|
|
"mode.");
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-06-19 01:36:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2014-02-08 01:25:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Slice prefix_slice = table_->GetPrefix(target);
|
2014-04-02 00:00:48 +02:00
|
|
|
uint32_t prefix_hash = 0;
|
|
|
|
// Bloom filter is ignored in total-order mode.
|
|
|
|
if (!table_->IsTotalOrderMode()) {
|
2014-02-08 01:25:38 +01:00
|
|
|
prefix_hash = GetSliceHash(prefix_slice);
|
2014-04-02 00:00:48 +02:00
|
|
|
if (!table_->MatchBloom(prefix_hash)) {
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-04-02 00:00:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2013-11-22 00:13:45 +01:00
|
|
|
}
|
2013-11-21 20:11:02 +01:00
|
|
|
bool prefix_match;
|
2015-11-18 03:29:40 +01:00
|
|
|
status_ = table_->GetOffset(&decoder_, target, prefix_slice, prefix_hash,
|
|
|
|
prefix_match, &next_offset_);
|
2013-12-20 18:35:24 +01:00
|
|
|
if (!status_.ok()) {
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2013-12-20 18:35:24 +01:00
|
|
|
return;
|
|
|
|
}
|
2013-11-21 20:11:02 +01:00
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
if (next_offset_ < table_->file_info_.data_end_offset) {
|
2013-11-21 20:11:02 +01:00
|
|
|
for (Next(); status_.ok() && Valid(); Next()) {
|
|
|
|
if (!prefix_match) {
|
|
|
|
// Need to verify the first key's prefix
|
2013-12-20 18:35:24 +01:00
|
|
|
if (table_->GetPrefix(key()) != prefix_slice) {
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2013-11-21 20:11:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
prefix_match = true;
|
|
|
|
}
|
2014-01-27 22:53:22 +01:00
|
|
|
if (table_->internal_comparator_.Compare(key(), target) >= 0) {
|
2013-11-21 20:11:02 +01:00
|
|
|
break;
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
2013-11-21 20:11:02 +01:00
|
|
|
} else {
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = table_->file_info_.data_end_offset;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::Next() {
|
|
|
|
offset_ = next_offset_;
|
2015-09-17 01:57:43 +02:00
|
|
|
if (offset_ < table_->file_info_.data_end_offset) {
|
2014-01-27 22:53:22 +01:00
|
|
|
Slice tmp_slice;
|
|
|
|
ParsedInternalKey parsed_key;
|
2014-06-19 01:36:48 +02:00
|
|
|
status_ =
|
|
|
|
table_->Next(&decoder_, &next_offset_, &parsed_key, &key_, &value_);
|
|
|
|
if (!status_.ok()) {
|
2015-09-17 01:57:43 +02:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-01-27 22:53:22 +01:00
|
|
|
}
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::Prev() {
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice PlainTableIterator::key() const {
|
2014-01-27 22:53:22 +01:00
|
|
|
assert(Valid());
|
2014-06-19 01:36:48 +02:00
|
|
|
return key_;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Slice PlainTableIterator::value() const {
|
2014-01-27 22:53:22 +01:00
|
|
|
assert(Valid());
|
2013-10-29 04:34:02 +01:00
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PlainTableIterator::status() const {
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
2014-04-15 22:39:26 +02:00
|
|
|
#endif // ROCKSDB_LITE
|