2020-03-13 05:39:36 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "table/block_based/partitioned_index_reader.h"
|
2020-06-29 23:51:57 +02:00
|
|
|
|
2021-01-26 07:07:26 +01:00
|
|
|
#include "file/random_access_file_reader.h"
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 20:17:34 +01:00
|
|
|
#include "table/block_based/partitioned_index_iterator.h"
|
2020-03-13 05:39:36 +01:00
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
Status PartitionIndexReader::Create(
|
2020-06-29 23:51:57 +02:00
|
|
|
const BlockBasedTable* table, const ReadOptions& ro,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch,
|
|
|
|
bool pin, BlockCacheLookupContext* lookup_context,
|
2020-03-13 05:39:36 +01:00
|
|
|
std::unique_ptr<IndexReader>* index_reader) {
|
|
|
|
assert(table != nullptr);
|
|
|
|
assert(table->get_rep());
|
|
|
|
assert(!pin || prefetch);
|
|
|
|
assert(index_reader != nullptr);
|
|
|
|
|
|
|
|
CachableEntry<Block> index_block;
|
|
|
|
if (prefetch || !use_cache) {
|
|
|
|
const Status s =
|
2020-06-29 23:51:57 +02:00
|
|
|
ReadIndexBlock(table, prefetch_buffer, ro, use_cache,
|
2020-03-13 05:39:36 +01:00
|
|
|
/*get_context=*/nullptr, lookup_context, &index_block);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (use_cache && !pin) {
|
|
|
|
index_block.Reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
index_reader->reset(new PartitionIndexReader(table, std::move(index_block)));
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
InternalIteratorBase<IndexValue>* PartitionIndexReader::NewIterator(
|
|
|
|
const ReadOptions& read_options, bool /* disable_prefix_seek */,
|
|
|
|
IndexBlockIter* iter, GetContext* get_context,
|
|
|
|
BlockCacheLookupContext* lookup_context) {
|
|
|
|
const bool no_io = (read_options.read_tier == kBlockCacheTier);
|
|
|
|
CachableEntry<Block> index_block;
|
|
|
|
const Status s =
|
|
|
|
GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
|
|
|
|
if (!s.ok()) {
|
|
|
|
if (iter != nullptr) {
|
|
|
|
iter->Invalidate(s);
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NewErrorInternalIterator<IndexValue>(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
const BlockBasedTable::Rep* rep = table()->rep_;
|
|
|
|
InternalIteratorBase<IndexValue>* it = nullptr;
|
|
|
|
|
|
|
|
Statistics* kNullStats = nullptr;
|
|
|
|
// Filters are already checked before seeking the index
|
|
|
|
if (!partition_map_.empty()) {
|
|
|
|
// We don't return pinned data from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
|
|
|
it = NewTwoLevelIterator(
|
|
|
|
new BlockBasedTable::PartitionedIndexIteratorState(table(),
|
|
|
|
&partition_map_),
|
|
|
|
index_block.GetValue()->NewIndexIterator(
|
2020-07-08 02:25:08 +02:00
|
|
|
internal_comparator()->user_comparator(),
|
2020-03-13 05:39:36 +01:00
|
|
|
rep->get_global_seqno(BlockType::kIndex), nullptr, kNullStats, true,
|
|
|
|
index_has_first_key(), index_key_includes_seq(),
|
|
|
|
index_value_is_full()));
|
|
|
|
} else {
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.fill_cache = read_options.fill_cache;
|
2020-06-29 23:51:57 +02:00
|
|
|
ro.deadline = read_options.deadline;
|
2020-08-07 20:59:19 +02:00
|
|
|
ro.io_timeout = read_options.io_timeout;
|
2020-03-13 05:39:36 +01:00
|
|
|
// We don't return pinned data from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 20:17:34 +01:00
|
|
|
std::unique_ptr<InternalIteratorBase<IndexValue>> index_iter(
|
2020-03-13 05:39:36 +01:00
|
|
|
index_block.GetValue()->NewIndexIterator(
|
2020-07-08 02:25:08 +02:00
|
|
|
internal_comparator()->user_comparator(),
|
2020-03-13 05:39:36 +01:00
|
|
|
rep->get_global_seqno(BlockType::kIndex), nullptr, kNullStats, true,
|
|
|
|
index_has_first_key(), index_key_includes_seq(),
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 20:17:34 +01:00
|
|
|
index_value_is_full()));
|
|
|
|
|
2020-09-30 01:26:58 +02:00
|
|
|
it = new PartitionedIndexIterator(
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 20:17:34 +01:00
|
|
|
table(), ro, *internal_comparator(), std::move(index_iter),
|
2020-03-13 05:39:36 +01:00
|
|
|
lookup_context ? lookup_context->caller
|
|
|
|
: TableReaderCaller::kUncategorized);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(it != nullptr);
|
|
|
|
index_block.TransferTo(it);
|
|
|
|
|
|
|
|
return it;
|
|
|
|
|
|
|
|
// TODO(myabandeh): Update TwoLevelIterator to be able to make use of
|
|
|
|
// on-stack BlockIter while the state is on heap. Currentlly it assumes
|
|
|
|
// the first level iter is always on heap and will attempt to delete it
|
|
|
|
// in its destructor.
|
|
|
|
}
|
2020-08-26 03:59:19 +02:00
|
|
|
Status PartitionIndexReader::CacheDependencies(const ReadOptions& ro,
|
|
|
|
bool pin) {
|
2020-03-13 05:39:36 +01:00
|
|
|
// Before read partitions, prefetch them to avoid lots of IOs
|
|
|
|
BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
|
|
|
|
const BlockBasedTable::Rep* rep = table()->rep_;
|
|
|
|
IndexBlockIter biter;
|
|
|
|
BlockHandle handle;
|
|
|
|
Statistics* kNullStats = nullptr;
|
|
|
|
|
|
|
|
CachableEntry<Block> index_block;
|
2021-10-28 02:21:48 +02:00
|
|
|
{
|
|
|
|
Status s = GetOrReadIndexBlock(false /* no_io */, nullptr /* get_context */,
|
|
|
|
&lookup_context, &index_block);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2020-03-13 05:39:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// We don't return pinned data from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
|
|
|
index_block.GetValue()->NewIndexIterator(
|
2020-07-08 02:25:08 +02:00
|
|
|
internal_comparator()->user_comparator(),
|
2020-03-13 05:39:36 +01:00
|
|
|
rep->get_global_seqno(BlockType::kIndex), &biter, kNullStats, true,
|
|
|
|
index_has_first_key(), index_key_includes_seq(), index_value_is_full());
|
|
|
|
// Index partitions are assumed to be consecuitive. Prefetch them all.
|
|
|
|
// Read the first block offset
|
|
|
|
biter.SeekToFirst();
|
|
|
|
if (!biter.Valid()) {
|
|
|
|
// Empty index.
|
2020-08-26 03:59:19 +02:00
|
|
|
return biter.status();
|
2020-03-13 05:39:36 +01:00
|
|
|
}
|
|
|
|
handle = biter.value().handle;
|
|
|
|
uint64_t prefetch_off = handle.offset();
|
|
|
|
|
|
|
|
// Read the last block's offset
|
|
|
|
biter.SeekToLast();
|
|
|
|
if (!biter.Valid()) {
|
|
|
|
// Empty index.
|
2020-08-26 03:59:19 +02:00
|
|
|
return biter.status();
|
2020-03-13 05:39:36 +01:00
|
|
|
}
|
|
|
|
handle = biter.value().handle;
|
|
|
|
uint64_t last_off = handle.offset() + block_size(handle);
|
|
|
|
uint64_t prefetch_len = last_off - prefetch_off;
|
|
|
|
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
|
2021-04-28 21:52:53 +02:00
|
|
|
rep->CreateFilePrefetchBuffer(0, 0, &prefetch_buffer,
|
|
|
|
false /*Implicit auto readahead*/);
|
2020-06-29 23:51:57 +02:00
|
|
|
IOOptions opts;
|
2021-10-28 02:21:48 +02:00
|
|
|
{
|
|
|
|
Status s = rep->file->PrepareIOOptions(ro, opts);
|
|
|
|
if (s.ok()) {
|
|
|
|
s = prefetch_buffer->Prefetch(opts, rep->file.get(), prefetch_off,
|
|
|
|
static_cast<size_t>(prefetch_len));
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2020-08-26 03:59:19 +02:00
|
|
|
}
|
2020-03-13 05:39:36 +01:00
|
|
|
|
2021-10-28 02:21:48 +02:00
|
|
|
// For saving "all or nothing" to partition_map_
|
|
|
|
std::unordered_map<uint64_t, CachableEntry<Block>> map_in_progress;
|
|
|
|
|
2020-03-13 05:39:36 +01:00
|
|
|
// After prefetch, read the partitions one by one
|
|
|
|
biter.SeekToFirst();
|
2021-10-28 02:21:48 +02:00
|
|
|
size_t partition_count = 0;
|
2020-03-13 05:39:36 +01:00
|
|
|
for (; biter.Valid(); biter.Next()) {
|
|
|
|
handle = biter.value().handle;
|
|
|
|
CachableEntry<Block> block;
|
2021-10-28 02:21:48 +02:00
|
|
|
++partition_count;
|
2020-03-13 05:39:36 +01:00
|
|
|
// TODO: Support counter batch update for partitioned index and
|
|
|
|
// filter blocks
|
2021-10-28 02:21:48 +02:00
|
|
|
Status s = table()->MaybeReadBlockAndLoadToCache(
|
2020-03-13 05:39:36 +01:00
|
|
|
prefetch_buffer.get(), ro, handle, UncompressionDict::GetEmptyDict(),
|
2021-06-18 18:35:03 +02:00
|
|
|
/*wait=*/true, &block, BlockType::kIndex, /*get_context=*/nullptr,
|
|
|
|
&lookup_context, /*contents=*/nullptr);
|
2020-03-13 05:39:36 +01:00
|
|
|
|
2020-08-26 03:59:19 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (block.GetValue() != nullptr) {
|
2021-10-28 02:21:48 +02:00
|
|
|
// Might need to "pin" some mmap-read blocks (GetOwnValue) if some
|
|
|
|
// partitions are successfully compressed (cached) and some are not
|
|
|
|
// compressed (mmap eligible)
|
2020-10-11 23:52:49 +02:00
|
|
|
if (block.IsCached() || block.GetOwnValue()) {
|
2020-03-13 05:39:36 +01:00
|
|
|
if (pin) {
|
2021-10-28 02:21:48 +02:00
|
|
|
map_in_progress[handle.offset()] = std::move(block);
|
2020-03-13 05:39:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-10-28 02:21:48 +02:00
|
|
|
Status s = biter.status();
|
|
|
|
// Save (pin) them only if everything checks out
|
|
|
|
if (map_in_progress.size() == partition_count && s.ok()) {
|
|
|
|
std::swap(partition_map_, map_in_progress);
|
|
|
|
}
|
|
|
|
return s;
|
2020-03-13 05:39:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|