2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-29 01:54:09 +01:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2020-04-22 02:35:28 +02:00
|
|
|
#include "table/block_based/block_based_table_factory.h"
|
|
|
|
|
2017-10-13 23:41:07 +02:00
|
|
|
#include <stdint.h>
|
|
|
|
|
2020-04-22 02:35:28 +02:00
|
|
|
#include <cinttypes>
|
2013-10-29 01:54:09 +01:00
|
|
|
#include <memory>
|
2014-03-01 03:19:07 +01:00
|
|
|
#include <string>
|
|
|
|
|
2017-07-29 01:23:50 +02:00
|
|
|
#include "options/options_helper.h"
|
2020-04-22 02:35:28 +02:00
|
|
|
#include "options/options_parser.h"
|
2015-01-15 01:24:24 +01:00
|
|
|
#include "port/port.h"
|
2014-08-25 23:22:05 +02:00
|
|
|
#include "rocksdb/cache.h"
|
2017-07-29 01:23:50 +02:00
|
|
|
#include "rocksdb/convenience.h"
|
|
|
|
#include "rocksdb/flush_block_policy.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/block_based/block_based_table_builder.h"
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
2015-01-15 01:24:24 +01:00
|
|
|
#include "table/format.h"
|
2018-07-20 23:31:27 +02:00
|
|
|
#include "util/mutexlock.h"
|
2017-07-29 01:23:50 +02:00
|
|
|
#include "util/string_util.h"
|
2013-10-29 01:54:09 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-10-29 01:54:09 +01:00
|
|
|
|
2018-07-20 23:31:27 +02:00
|
|
|
void TailPrefetchStats::RecordEffectiveSize(size_t len) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
if (num_records_ < kNumTracked) {
|
|
|
|
num_records_++;
|
|
|
|
}
|
|
|
|
records_[next_++] = len;
|
|
|
|
if (next_ == kNumTracked) {
|
|
|
|
next_ = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t TailPrefetchStats::GetSuggestedPrefetchSize() {
|
|
|
|
std::vector<size_t> sorted;
|
|
|
|
{
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
|
|
|
|
if (num_records_ == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
sorted.assign(records_, records_ + num_records_);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Of the historic size, we find the maximum one that satisifis the condtiion
|
|
|
|
// that if prefetching all, less than 1/8 will be wasted.
|
|
|
|
std::sort(sorted.begin(), sorted.end());
|
|
|
|
|
|
|
|
// Assuming we have 5 data points, and after sorting it looks like this:
|
|
|
|
//
|
|
|
|
// +---+
|
|
|
|
// +---+ | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// +---+ | | | |
|
|
|
|
// | | | | | |
|
|
|
|
// +---+ | | | | | |
|
|
|
|
// | | | | | | | |
|
|
|
|
// +---+ | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// +---+ +---+ +---+ +---+ +---+
|
|
|
|
//
|
|
|
|
// and we use every of the value as a candidate, and estimate how much we
|
|
|
|
// wasted, compared to read. For example, when we use the 3rd record
|
|
|
|
// as candiate. This area is what we read:
|
|
|
|
// +---+
|
|
|
|
// +---+ | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// *** *** *** ***+ *** *** *** *** **
|
|
|
|
// * | | | | | |
|
|
|
|
// +---+ | | | | | *
|
|
|
|
// * | | | | | | | |
|
|
|
|
// +---+ | | | | | | | *
|
|
|
|
// * | | | | X | | | | |
|
|
|
|
// | | | | | | | | | *
|
|
|
|
// * | | | | | | | | |
|
|
|
|
// | | | | | | | | | *
|
|
|
|
// * | | | | | | | | |
|
|
|
|
// *** *** ***-*** ***--*** ***--*** +****
|
|
|
|
// which is (size of the record) X (number of records).
|
|
|
|
//
|
|
|
|
// While wasted is this area:
|
|
|
|
// +---+
|
|
|
|
// +---+ | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// | | | |
|
|
|
|
// *** *** *** ****---+ | | | |
|
|
|
|
// * * | | | | |
|
|
|
|
// * *-*** *** | | | | |
|
|
|
|
// * * | | | | | | |
|
|
|
|
// *--** *** | | | | | | |
|
|
|
|
// | | | | | X | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// +---+ +---+ +---+ +---+ +---+
|
|
|
|
//
|
|
|
|
// Which can be calculated iteratively.
|
|
|
|
// The difference between wasted using 4st and 3rd record, will
|
|
|
|
// be following area:
|
|
|
|
// +---+
|
|
|
|
// +--+ +-+ ++ +-+ +-+ +---+ | |
|
|
|
|
// + xxxxxxxxxxxxxxxxxxxxxxxx | | | |
|
|
|
|
// xxxxxxxxxxxxxxxxxxxxxxxx | | | |
|
|
|
|
// + xxxxxxxxxxxxxxxxxxxxxxxx | | | |
|
|
|
|
// | xxxxxxxxxxxxxxxxxxxxxxxx | | | |
|
|
|
|
// +-+ +-+ +-+ ++ +---+ +--+ | | |
|
|
|
|
// | | | | | | |
|
|
|
|
// +---+ ++ | | | | | |
|
|
|
|
// | | | | | | X | | |
|
|
|
|
// +---+ ++ | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// | | | | | | | | | |
|
|
|
|
// +---+ +---+ +---+ +---+ +---+
|
|
|
|
//
|
|
|
|
// which will be the size difference between 4st and 3rd record,
|
|
|
|
// times 3, which is number of records before the 4st.
|
|
|
|
// Here we assume that all data within the prefetch range will be useful. In
|
|
|
|
// reality, it may not be the case when a partial block is inside the range,
|
|
|
|
// or there are data in the middle that is not read. We ignore those cases
|
|
|
|
// for simplicity.
|
|
|
|
assert(!sorted.empty());
|
|
|
|
size_t prev_size = sorted[0];
|
|
|
|
size_t max_qualified_size = sorted[0];
|
|
|
|
size_t wasted = 0;
|
|
|
|
for (size_t i = 1; i < sorted.size(); i++) {
|
|
|
|
size_t read = sorted[i] * sorted.size();
|
|
|
|
wasted += (sorted[i] - prev_size) * i;
|
|
|
|
if (wasted <= read / 8) {
|
|
|
|
max_qualified_size = sorted[i];
|
|
|
|
}
|
|
|
|
prev_size = sorted[i];
|
|
|
|
}
|
|
|
|
const size_t kMaxPrefetchSize = 512 * 1024; // Never exceed 512KB
|
|
|
|
return std::min(kMaxPrefetchSize, max_qualified_size);
|
|
|
|
}
|
|
|
|
|
2020-04-03 19:48:46 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo>
|
|
|
|
block_based_table_type_info = {
|
|
|
|
/* currently not supported
|
|
|
|
std::shared_ptr<Cache> block_cache = nullptr;
|
|
|
|
std::shared_ptr<Cache> block_cache_compressed = nullptr;
|
|
|
|
*/
|
|
|
|
{"flush_block_policy_factory",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, flush_block_policy_factory),
|
|
|
|
OptionType::kFlushBlockPolicyFactory, OptionVerificationType::kByName,
|
2020-04-29 03:02:11 +02:00
|
|
|
OptionTypeFlags::kCompareNever, 0}},
|
2020-04-03 19:48:46 +02:00
|
|
|
{"cache_index_and_filter_blocks",
|
|
|
|
{offsetof(struct BlockBasedTableOptions,
|
|
|
|
cache_index_and_filter_blocks),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"cache_index_and_filter_blocks_with_high_priority",
|
|
|
|
{offsetof(struct BlockBasedTableOptions,
|
|
|
|
cache_index_and_filter_blocks_with_high_priority),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"pin_l0_filter_and_index_blocks_in_cache",
|
|
|
|
{offsetof(struct BlockBasedTableOptions,
|
|
|
|
pin_l0_filter_and_index_blocks_in_cache),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"index_type",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, index_type),
|
|
|
|
OptionType::kBlockBasedTableIndexType,
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone, 0}},
|
|
|
|
{"hash_index_allow_collision",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, hash_index_allow_collision),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"data_block_index_type",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, data_block_index_type),
|
|
|
|
OptionType::kBlockBasedTableDataBlockIndexType,
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone, 0}},
|
|
|
|
{"index_shortening",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, index_shortening),
|
|
|
|
OptionType::kBlockBasedTableIndexShorteningMode,
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone, 0}},
|
|
|
|
{"data_block_hash_table_util_ratio",
|
|
|
|
{offsetof(struct BlockBasedTableOptions,
|
|
|
|
data_block_hash_table_util_ratio),
|
|
|
|
OptionType::kDouble, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"checksum",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, checksum),
|
|
|
|
OptionType::kChecksumType, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"no_block_cache",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, no_block_cache),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"block_size",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_size),
|
|
|
|
OptionType::kSizeT, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"block_size_deviation",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_size_deviation),
|
|
|
|
OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"block_restart_interval",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_restart_interval),
|
|
|
|
OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"index_block_restart_interval",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, index_block_restart_interval),
|
|
|
|
OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"index_per_partition",
|
|
|
|
{0, OptionType::kUInt64T, OptionVerificationType::kDeprecated,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"metadata_block_size",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, metadata_block_size),
|
|
|
|
OptionType::kUInt64T, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"partition_filters",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, partition_filters),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"filter_policy",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, filter_policy),
|
2020-04-29 03:02:11 +02:00
|
|
|
OptionType::kUnknown, OptionVerificationType::kByNameAllowFromNull,
|
|
|
|
OptionTypeFlags::kNone, 0,
|
|
|
|
// Parses the Filter policy
|
|
|
|
[](const ConfigOptions& opts, const std::string&,
|
|
|
|
const std::string& value, char* addr) {
|
|
|
|
auto* policy =
|
|
|
|
reinterpret_cast<std::shared_ptr<const FilterPolicy>*>(addr);
|
|
|
|
return FilterPolicy::CreateFromString(opts, value, policy);
|
|
|
|
},
|
|
|
|
// Converts the FilterPolicy to its string representation
|
|
|
|
[](const ConfigOptions&, const std::string&, const char* addr,
|
|
|
|
std::string* value) {
|
|
|
|
const auto* policy =
|
|
|
|
reinterpret_cast<const std::shared_ptr<const FilterPolicy>*>(
|
|
|
|
addr);
|
|
|
|
if (policy->get()) {
|
|
|
|
*value = (*policy)->Name();
|
|
|
|
} else {
|
|
|
|
*value = kNullptrString;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
},
|
|
|
|
// Compares two FilterPolicy objects for equality
|
|
|
|
[](const ConfigOptions&, const std::string&, const char* addr1,
|
|
|
|
const char* addr2, std::string*) {
|
|
|
|
const auto* policy1 =
|
|
|
|
reinterpret_cast<const std::shared_ptr<const FilterPolicy>*>(
|
|
|
|
addr1)
|
|
|
|
->get();
|
|
|
|
const auto* policy2 =
|
|
|
|
reinterpret_cast<const std::shared_ptr<FilterPolicy>*>(addr2)
|
|
|
|
->get();
|
|
|
|
if (policy1 == policy2) {
|
|
|
|
return true;
|
|
|
|
} else if (policy1 != nullptr && policy2 != nullptr) {
|
|
|
|
return (strcmp(policy1->Name(), policy2->Name()) == 0);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}}},
|
2020-04-03 19:48:46 +02:00
|
|
|
{"whole_key_filtering",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, whole_key_filtering),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"skip_table_builder_flush",
|
|
|
|
{0, OptionType::kBoolean, OptionVerificationType::kDeprecated,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"format_version",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, format_version),
|
|
|
|
OptionType::kUInt32T, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"verify_compression",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, verify_compression),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"read_amp_bytes_per_bit",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, read_amp_bytes_per_bit),
|
|
|
|
OptionType::kSizeT, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"enable_index_compression",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, enable_index_compression),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"block_align",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_align),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"pin_top_level_index_and_filter",
|
|
|
|
{offsetof(struct BlockBasedTableOptions,
|
|
|
|
pin_top_level_index_and_filter),
|
|
|
|
OptionType::kBoolean, OptionVerificationType::kNormal,
|
2020-04-29 03:02:11 +02:00
|
|
|
OptionTypeFlags::kNone, 0}},
|
|
|
|
{"block_cache",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_cache),
|
|
|
|
OptionType::kUnknown, OptionVerificationType::kNormal,
|
|
|
|
(OptionTypeFlags::kCompareNever | OptionTypeFlags::kDontSerialize), 0,
|
|
|
|
// Parses the input vsalue as a Cache
|
|
|
|
[](const ConfigOptions& opts, const std::string&,
|
|
|
|
const std::string& value, char* addr) {
|
|
|
|
auto* cache = reinterpret_cast<std::shared_ptr<Cache>*>(addr);
|
|
|
|
return Cache::CreateFromString(opts, value, cache);
|
|
|
|
}}},
|
|
|
|
{"block_cache_compressed",
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_cache_compressed),
|
|
|
|
OptionType::kUnknown, OptionVerificationType::kNormal,
|
|
|
|
(OptionTypeFlags::kCompareNever | OptionTypeFlags::kDontSerialize), 0,
|
|
|
|
// Parses the input vsalue as a Cache
|
|
|
|
[](const ConfigOptions& opts, const std::string&,
|
|
|
|
const std::string& value, char* addr) {
|
|
|
|
auto* cache = reinterpret_cast<std::shared_ptr<Cache>*>(addr);
|
|
|
|
return Cache::CreateFromString(opts, value, cache);
|
|
|
|
}}},
|
|
|
|
};
|
2020-04-03 19:48:46 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2020-01-14 20:19:47 +01:00
|
|
|
// TODO(myabandeh): We should return an error instead of silently changing the
|
|
|
|
// options
|
2014-03-01 01:39:27 +01:00
|
|
|
BlockBasedTableFactory::BlockBasedTableFactory(
|
2015-11-18 01:41:54 +01:00
|
|
|
const BlockBasedTableOptions& _table_options)
|
|
|
|
: table_options_(_table_options) {
|
2014-03-01 01:39:27 +01:00
|
|
|
if (table_options_.flush_block_policy_factory == nullptr) {
|
|
|
|
table_options_.flush_block_policy_factory.reset(
|
|
|
|
new FlushBlockBySizePolicyFactory());
|
|
|
|
}
|
2014-08-25 23:22:05 +02:00
|
|
|
if (table_options_.no_block_cache) {
|
|
|
|
table_options_.block_cache.reset();
|
|
|
|
} else if (table_options_.block_cache == nullptr) {
|
2019-06-27 19:16:21 +02:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = 8 << 20;
|
|
|
|
// It makes little sense to pay overhead for mid-point insertion while the
|
|
|
|
// block size is only 8MB.
|
|
|
|
co.high_pri_pool_ratio = 0.0;
|
|
|
|
table_options_.block_cache = NewLRUCache(co);
|
2014-08-25 23:22:05 +02:00
|
|
|
}
|
|
|
|
if (table_options_.block_size_deviation < 0 ||
|
|
|
|
table_options_.block_size_deviation > 100) {
|
|
|
|
table_options_.block_size_deviation = 0;
|
|
|
|
}
|
2016-01-04 19:51:00 +01:00
|
|
|
if (table_options_.block_restart_interval < 1) {
|
|
|
|
table_options_.block_restart_interval = 1;
|
|
|
|
}
|
2016-02-05 19:22:37 +01:00
|
|
|
if (table_options_.index_block_restart_interval < 1) {
|
|
|
|
table_options_.index_block_restart_interval = 1;
|
|
|
|
}
|
2020-01-14 20:19:47 +01:00
|
|
|
if (table_options_.index_type == BlockBasedTableOptions::kHashSearch &&
|
|
|
|
table_options_.index_block_restart_interval != 1) {
|
|
|
|
// Currently kHashSearch is incompatible with index_block_restart_interval > 1
|
|
|
|
table_options_.index_block_restart_interval = 1;
|
|
|
|
}
|
2017-06-24 03:18:21 +02:00
|
|
|
if (table_options_.partition_filters &&
|
|
|
|
table_options_.index_type !=
|
|
|
|
BlockBasedTableOptions::kTwoLevelIndexSearch) {
|
|
|
|
// We do not support partitioned filters without partitioning indexes
|
|
|
|
table_options_.partition_filters = false;
|
|
|
|
}
|
2014-03-01 01:39:27 +01:00
|
|
|
}
|
|
|
|
|
2014-01-28 06:58:46 +01:00
|
|
|
Status BlockBasedTableFactory::NewTableReader(
|
2015-09-11 20:36:33 +02:00
|
|
|
const TableReaderOptions& table_reader_options,
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
|
|
|
|
std::unique_ptr<TableReader>* table_reader,
|
2016-07-20 20:23:31 +02:00
|
|
|
bool prefetch_index_and_filter_in_cache) const {
|
2015-09-11 20:36:33 +02:00
|
|
|
return BlockBasedTable::Open(
|
|
|
|
table_reader_options.ioptions, table_reader_options.env_options,
|
|
|
|
table_options_, table_reader_options.internal_comparator, std::move(file),
|
2018-05-21 23:33:55 +02:00
|
|
|
file_size, table_reader, table_reader_options.prefix_extractor,
|
|
|
|
prefetch_index_and_filter_in_cache, table_reader_options.skip_filters,
|
2018-07-20 23:31:27 +02:00
|
|
|
table_reader_options.level, table_reader_options.immortal,
|
2019-06-14 00:39:52 +02:00
|
|
|
table_reader_options.largest_seqno, &tail_prefetch_stats_,
|
|
|
|
table_reader_options.block_cache_tracer);
|
2013-10-29 01:54:09 +01:00
|
|
|
}
|
|
|
|
|
2014-01-28 06:58:46 +01:00
|
|
|
TableBuilder* BlockBasedTableFactory::NewTableBuilder(
|
2015-10-09 01:57:35 +02:00
|
|
|
const TableBuilderOptions& table_builder_options, uint32_t column_family_id,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
WritableFileWriter* file) const {
|
2014-03-01 03:19:07 +01:00
|
|
|
auto table_builder = new BlockBasedTableBuilder(
|
2018-05-21 23:33:55 +02:00
|
|
|
table_builder_options.ioptions, table_builder_options.moptions,
|
|
|
|
table_options_, table_builder_options.internal_comparator,
|
2015-10-09 01:57:35 +02:00
|
|
|
table_builder_options.int_tbl_prop_collector_factories, column_family_id,
|
|
|
|
file, table_builder_options.compression_type,
|
2019-03-18 20:07:35 +01:00
|
|
|
table_builder_options.sample_for_compression,
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
table_builder_options.compression_opts,
|
2016-04-07 08:10:32 +02:00
|
|
|
table_builder_options.skip_filters,
|
2019-11-27 03:18:29 +01:00
|
|
|
table_builder_options.column_family_name, table_builder_options.level,
|
2017-10-24 00:22:05 +02:00
|
|
|
table_builder_options.creation_time,
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
2019-02-12 04:42:25 +01:00
|
|
|
table_builder_options.oldest_key_time,
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 04:24:25 +02:00
|
|
|
table_builder_options.target_file_size,
|
|
|
|
table_builder_options.file_creation_time);
|
2013-11-20 07:00:48 +01:00
|
|
|
|
|
|
|
return table_builder;
|
2013-10-29 01:54:09 +01:00
|
|
|
}
|
2013-11-20 07:00:48 +01:00
|
|
|
|
2014-10-18 06:18:36 +02:00
|
|
|
Status BlockBasedTableFactory::SanitizeOptions(
|
2019-05-14 02:43:47 +02:00
|
|
|
const DBOptions& db_opts, const ColumnFamilyOptions& cf_opts) const {
|
2014-10-18 06:18:36 +02:00
|
|
|
if (table_options_.index_type == BlockBasedTableOptions::kHashSearch &&
|
|
|
|
cf_opts.prefix_extractor == nullptr) {
|
2018-04-13 02:55:14 +02:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Hash index is specified for block-based "
|
2014-10-18 06:18:36 +02:00
|
|
|
"table, but prefix_extractor is not given");
|
|
|
|
}
|
2014-10-22 20:52:35 +02:00
|
|
|
if (table_options_.cache_index_and_filter_blocks &&
|
|
|
|
table_options_.no_block_cache) {
|
2018-04-13 02:55:14 +02:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Enable cache_index_and_filter_blocks, "
|
2014-10-22 20:52:35 +02:00
|
|
|
", but block cache is disabled");
|
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
if (table_options_.pin_l0_filter_and_index_blocks_in_cache &&
|
|
|
|
table_options_.no_block_cache) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Enable pin_l0_filter_and_index_blocks_in_cache, "
|
|
|
|
", but block cache is disabled");
|
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
if (!BlockBasedTableSupportedVersion(table_options_.format_version)) {
|
2015-01-13 23:33:04 +01:00
|
|
|
return Status::InvalidArgument(
|
2015-01-15 01:24:24 +01:00
|
|
|
"Unsupported BlockBasedTable format_version. Please check "
|
|
|
|
"include/rocksdb/table.h for more info");
|
2015-01-13 23:33:04 +01:00
|
|
|
}
|
2018-03-27 05:14:24 +02:00
|
|
|
if (table_options_.block_align && (cf_opts.compression != kNoCompression)) {
|
2018-04-13 02:55:14 +02:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Enable block_align, but compression "
|
2018-03-27 05:14:24 +02:00
|
|
|
"enabled");
|
|
|
|
}
|
|
|
|
if (table_options_.block_align &&
|
|
|
|
(table_options_.block_size & (table_options_.block_size - 1))) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Block alignment requested but block size is not a power of 2");
|
|
|
|
}
|
2019-06-20 20:41:59 +02:00
|
|
|
if (table_options_.block_size > port::kMaxUint32) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"block size exceeds maximum number (4GiB) allowed");
|
|
|
|
}
|
2018-08-15 23:27:47 +02:00
|
|
|
if (table_options_.data_block_index_type ==
|
2018-08-17 03:29:13 +02:00
|
|
|
BlockBasedTableOptions::kDataBlockBinaryAndHash &&
|
2018-08-15 23:27:47 +02:00
|
|
|
table_options_.data_block_hash_table_util_ratio <= 0) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"data_block_hash_table_util_ratio should be greater than 0 when "
|
|
|
|
"data_block_index_type is set to kDataBlockBinaryAndHash");
|
|
|
|
}
|
2019-05-14 02:43:47 +02:00
|
|
|
if (db_opts.unordered_write && cf_opts.max_successive_merges > 0) {
|
|
|
|
// TODO(myabandeh): support it
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"max_successive_merges larger than 0 is currently inconsistent with "
|
|
|
|
"unordered_write");
|
|
|
|
}
|
2014-10-18 06:18:36 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-08-25 23:24:09 +02:00
|
|
|
std::string BlockBasedTableFactory::GetPrintableTableOptions() const {
|
|
|
|
std::string ret;
|
|
|
|
ret.reserve(20000);
|
|
|
|
const int kBufferSize = 200;
|
|
|
|
char buffer[kBufferSize];
|
|
|
|
|
|
|
|
snprintf(buffer, kBufferSize, " flush_block_policy_factory: %s (%p)\n",
|
|
|
|
table_options_.flush_block_policy_factory->Name(),
|
2016-03-16 22:57:57 +01:00
|
|
|
static_cast<void*>(table_options_.flush_block_policy_factory.get()));
|
2014-08-25 23:24:09 +02:00
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " cache_index_and_filter_blocks: %d\n",
|
|
|
|
table_options_.cache_index_and_filter_blocks);
|
|
|
|
ret.append(buffer);
|
2016-12-22 23:44:01 +01:00
|
|
|
snprintf(buffer, kBufferSize,
|
|
|
|
" cache_index_and_filter_blocks_with_high_priority: %d\n",
|
|
|
|
table_options_.cache_index_and_filter_blocks_with_high_priority);
|
|
|
|
ret.append(buffer);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
snprintf(buffer, kBufferSize,
|
|
|
|
" pin_l0_filter_and_index_blocks_in_cache: %d\n",
|
|
|
|
table_options_.pin_l0_filter_and_index_blocks_in_cache);
|
|
|
|
ret.append(buffer);
|
2018-06-23 00:14:05 +02:00
|
|
|
snprintf(buffer, kBufferSize, " pin_top_level_index_and_filter: %d\n",
|
|
|
|
table_options_.pin_top_level_index_and_filter);
|
|
|
|
ret.append(buffer);
|
2014-08-25 23:24:09 +02:00
|
|
|
snprintf(buffer, kBufferSize, " index_type: %d\n",
|
|
|
|
table_options_.index_type);
|
|
|
|
ret.append(buffer);
|
2019-03-08 20:15:51 +01:00
|
|
|
snprintf(buffer, kBufferSize, " data_block_index_type: %d\n",
|
|
|
|
table_options_.data_block_index_type);
|
|
|
|
ret.append(buffer);
|
2019-04-22 17:17:45 +02:00
|
|
|
snprintf(buffer, kBufferSize, " index_shortening: %d\n",
|
|
|
|
static_cast<int>(table_options_.index_shortening));
|
|
|
|
ret.append(buffer);
|
2019-03-08 20:15:51 +01:00
|
|
|
snprintf(buffer, kBufferSize, " data_block_hash_table_util_ratio: %lf\n",
|
|
|
|
table_options_.data_block_hash_table_util_ratio);
|
|
|
|
ret.append(buffer);
|
2014-08-25 23:24:09 +02:00
|
|
|
snprintf(buffer, kBufferSize, " hash_index_allow_collision: %d\n",
|
|
|
|
table_options_.hash_index_allow_collision);
|
|
|
|
ret.append(buffer);
|
2018-04-13 02:55:14 +02:00
|
|
|
snprintf(buffer, kBufferSize, " checksum: %d\n", table_options_.checksum);
|
2014-08-25 23:24:09 +02:00
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " no_block_cache: %d\n",
|
|
|
|
table_options_.no_block_cache);
|
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " block_cache: %p\n",
|
2016-03-16 22:57:57 +01:00
|
|
|
static_cast<void*>(table_options_.block_cache.get()));
|
2014-08-25 23:24:09 +02:00
|
|
|
ret.append(buffer);
|
|
|
|
if (table_options_.block_cache) {
|
2016-12-22 23:44:01 +01:00
|
|
|
const char* block_cache_name = table_options_.block_cache->Name();
|
|
|
|
if (block_cache_name != nullptr) {
|
|
|
|
snprintf(buffer, kBufferSize, " block_cache_name: %s\n",
|
|
|
|
block_cache_name);
|
|
|
|
ret.append(buffer);
|
|
|
|
}
|
|
|
|
ret.append(" block_cache_options:\n");
|
|
|
|
ret.append(table_options_.block_cache->GetPrintableOptions());
|
|
|
|
}
|
|
|
|
snprintf(buffer, kBufferSize, " block_cache_compressed: %p\n",
|
|
|
|
static_cast<void*>(table_options_.block_cache_compressed.get()));
|
|
|
|
ret.append(buffer);
|
|
|
|
if (table_options_.block_cache_compressed) {
|
|
|
|
const char* block_cache_compressed_name =
|
|
|
|
table_options_.block_cache_compressed->Name();
|
|
|
|
if (block_cache_compressed_name != nullptr) {
|
|
|
|
snprintf(buffer, kBufferSize, " block_cache_name: %s\n",
|
|
|
|
block_cache_compressed_name);
|
|
|
|
ret.append(buffer);
|
|
|
|
}
|
|
|
|
ret.append(" block_cache_compressed_options:\n");
|
|
|
|
ret.append(table_options_.block_cache_compressed->GetPrintableOptions());
|
2014-08-25 23:24:09 +02:00
|
|
|
}
|
2016-12-19 23:00:04 +01:00
|
|
|
snprintf(buffer, kBufferSize, " persistent_cache: %p\n",
|
|
|
|
static_cast<void*>(table_options_.persistent_cache.get()));
|
|
|
|
ret.append(buffer);
|
|
|
|
if (table_options_.persistent_cache) {
|
|
|
|
snprintf(buffer, kBufferSize, " persistent_cache_options:\n");
|
|
|
|
ret.append(buffer);
|
|
|
|
ret.append(table_options_.persistent_cache->GetPrintableOptions());
|
|
|
|
}
|
2015-07-02 01:13:49 +02:00
|
|
|
snprintf(buffer, kBufferSize, " block_size: %" ROCKSDB_PRIszt "\n",
|
2014-08-25 23:24:09 +02:00
|
|
|
table_options_.block_size);
|
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " block_size_deviation: %d\n",
|
|
|
|
table_options_.block_size_deviation);
|
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " block_restart_interval: %d\n",
|
|
|
|
table_options_.block_restart_interval);
|
|
|
|
ret.append(buffer);
|
2016-02-05 19:22:37 +01:00
|
|
|
snprintf(buffer, kBufferSize, " index_block_restart_interval: %d\n",
|
|
|
|
table_options_.index_block_restart_interval);
|
|
|
|
ret.append(buffer);
|
2017-10-13 23:41:07 +02:00
|
|
|
snprintf(buffer, kBufferSize, " metadata_block_size: %" PRIu64 "\n",
|
|
|
|
table_options_.metadata_block_size);
|
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " partition_filters: %d\n",
|
|
|
|
table_options_.partition_filters);
|
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " use_delta_encoding: %d\n",
|
|
|
|
table_options_.use_delta_encoding);
|
|
|
|
ret.append(buffer);
|
2014-08-25 23:24:09 +02:00
|
|
|
snprintf(buffer, kBufferSize, " filter_policy: %s\n",
|
2018-04-13 02:55:14 +02:00
|
|
|
table_options_.filter_policy == nullptr
|
|
|
|
? "nullptr"
|
|
|
|
: table_options_.filter_policy->Name());
|
2014-08-25 23:24:09 +02:00
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " whole_key_filtering: %d\n",
|
|
|
|
table_options_.whole_key_filtering);
|
2015-10-31 02:33:01 +01:00
|
|
|
ret.append(buffer);
|
2017-10-13 23:41:07 +02:00
|
|
|
snprintf(buffer, kBufferSize, " verify_compression: %d\n",
|
|
|
|
table_options_.verify_compression);
|
|
|
|
ret.append(buffer);
|
|
|
|
snprintf(buffer, kBufferSize, " read_amp_bytes_per_bit: %d\n",
|
|
|
|
table_options_.read_amp_bytes_per_bit);
|
|
|
|
ret.append(buffer);
|
2015-01-13 23:33:04 +01:00
|
|
|
snprintf(buffer, kBufferSize, " format_version: %d\n",
|
|
|
|
table_options_.format_version);
|
2014-08-25 23:24:09 +02:00
|
|
|
ret.append(buffer);
|
2018-01-11 00:06:29 +01:00
|
|
|
snprintf(buffer, kBufferSize, " enable_index_compression: %d\n",
|
|
|
|
table_options_.enable_index_compression);
|
|
|
|
ret.append(buffer);
|
2018-03-27 05:14:24 +02:00
|
|
|
snprintf(buffer, kBufferSize, " block_align: %d\n",
|
|
|
|
table_options_.block_align);
|
|
|
|
ret.append(buffer);
|
2014-08-25 23:24:09 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-29 01:23:50 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
Status BlockBasedTableFactory::GetOptionString(
|
2020-04-22 02:35:28 +02:00
|
|
|
const ConfigOptions& config_options, std::string* opt_string) const {
|
2017-07-29 01:23:50 +02:00
|
|
|
assert(opt_string);
|
|
|
|
opt_string->clear();
|
2020-04-22 02:35:28 +02:00
|
|
|
return GetStringFromStruct(config_options, &table_options_,
|
|
|
|
block_based_table_type_info, opt_string);
|
2017-07-29 01:23:50 +02:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
Status BlockBasedTableFactory::GetOptionString(
|
2020-04-22 02:35:28 +02:00
|
|
|
const ConfigOptions& /*opts*/, std::string* /*opt_string*/) const {
|
2017-07-29 01:23:50 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
2015-10-30 23:58:46 +01:00
|
|
|
const BlockBasedTableOptions& BlockBasedTableFactory::table_options() const {
|
2014-11-21 04:24:39 +01:00
|
|
|
return table_options_;
|
|
|
|
}
|
|
|
|
|
2017-07-29 01:23:50 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
namespace {
|
2020-04-22 02:35:28 +02:00
|
|
|
std::string ParseBlockBasedTableOption(const ConfigOptions& config_options,
|
|
|
|
const std::string& name,
|
2017-07-29 01:23:50 +02:00
|
|
|
const std::string& org_value,
|
2020-04-22 02:35:28 +02:00
|
|
|
BlockBasedTableOptions* new_options) {
|
|
|
|
const std::string& value = config_options.input_strings_escaped
|
|
|
|
? UnescapeOptionString(org_value)
|
|
|
|
: org_value;
|
2017-07-29 01:23:50 +02:00
|
|
|
const auto iter = block_based_table_type_info.find(name);
|
|
|
|
if (iter == block_based_table_type_info.end()) {
|
2020-04-22 02:35:28 +02:00
|
|
|
if (config_options.ignore_unknown_options) {
|
2017-07-29 01:23:50 +02:00
|
|
|
return "";
|
|
|
|
} else {
|
|
|
|
return "Unrecognized option";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const auto& opt_info = iter->second;
|
2020-04-29 03:02:11 +02:00
|
|
|
Status s = opt_info.ParseOption(
|
|
|
|
config_options, iter->first, value,
|
|
|
|
reinterpret_cast<char*>(new_options) + opt_info.offset);
|
|
|
|
if (s.ok()) {
|
|
|
|
return "";
|
|
|
|
} else {
|
|
|
|
return s.ToString();
|
2017-07-29 01:23:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
Status GetBlockBasedTableOptionsFromString(
|
|
|
|
const BlockBasedTableOptions& table_options, const std::string& opts_str,
|
|
|
|
BlockBasedTableOptions* new_table_options) {
|
2020-04-22 02:35:28 +02:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
return GetBlockBasedTableOptionsFromString(config_options, table_options,
|
|
|
|
opts_str, new_table_options);
|
|
|
|
}
|
|
|
|
Status GetBlockBasedTableOptionsFromString(
|
|
|
|
const ConfigOptions& config_options,
|
|
|
|
const BlockBasedTableOptions& table_options, const std::string& opts_str,
|
|
|
|
BlockBasedTableOptions* new_table_options) {
|
2017-07-29 01:23:50 +02:00
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
Status s = StringToMap(opts_str, &opts_map);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2020-04-22 02:35:28 +02:00
|
|
|
return GetBlockBasedTableOptionsFromMap(config_options, table_options,
|
|
|
|
opts_map, new_table_options);
|
2017-07-29 01:23:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status GetBlockBasedTableOptionsFromMap(
|
|
|
|
const BlockBasedTableOptions& table_options,
|
|
|
|
const std::unordered_map<std::string, std::string>& opts_map,
|
|
|
|
BlockBasedTableOptions* new_table_options, bool input_strings_escaped,
|
|
|
|
bool ignore_unknown_options) {
|
2020-04-22 02:35:28 +02:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = input_strings_escaped;
|
|
|
|
config_options.ignore_unknown_options = ignore_unknown_options;
|
|
|
|
|
|
|
|
return GetBlockBasedTableOptionsFromMap(config_options, table_options,
|
|
|
|
opts_map, new_table_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status GetBlockBasedTableOptionsFromMap(
|
|
|
|
const ConfigOptions& config_options,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
|
|
|
const std::unordered_map<std::string, std::string>& opts_map,
|
|
|
|
BlockBasedTableOptions* new_table_options) {
|
2017-07-29 01:23:50 +02:00
|
|
|
assert(new_table_options);
|
|
|
|
*new_table_options = table_options;
|
|
|
|
for (const auto& o : opts_map) {
|
|
|
|
auto error_message = ParseBlockBasedTableOption(
|
2020-04-22 02:35:28 +02:00
|
|
|
config_options, o.first, o.second, new_table_options);
|
2017-07-29 01:23:50 +02:00
|
|
|
if (error_message != "") {
|
|
|
|
const auto iter = block_based_table_type_info.find(o.first);
|
|
|
|
if (iter == block_based_table_type_info.end() ||
|
2020-04-22 02:35:28 +02:00
|
|
|
!config_options
|
|
|
|
.input_strings_escaped || // !input_strings_escaped indicates
|
|
|
|
// the old API, where everything is
|
|
|
|
// parsable.
|
2020-04-03 19:48:46 +02:00
|
|
|
(!iter->second.IsByName() && !iter->second.IsDeprecated())) {
|
2017-07-29 01:23:50 +02:00
|
|
|
// Restore "new_options" to the default "base_options".
|
|
|
|
*new_table_options = table_options;
|
|
|
|
return Status::InvalidArgument("Can't parse BlockBasedTableOptions:",
|
|
|
|
o.first + " " + error_message);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2020-04-22 02:35:28 +02:00
|
|
|
Status VerifyBlockBasedTableFactory(const ConfigOptions& config_options,
|
|
|
|
const BlockBasedTableFactory* base_tf,
|
|
|
|
const BlockBasedTableFactory* file_tf) {
|
2017-07-29 01:23:50 +02:00
|
|
|
if ((base_tf != nullptr) != (file_tf != nullptr) &&
|
2020-04-22 02:35:28 +02:00
|
|
|
config_options.sanity_level > ConfigOptions::kSanityLevelNone) {
|
2017-07-29 01:23:50 +02:00
|
|
|
return Status::Corruption(
|
|
|
|
"[RocksDBOptionsParser]: Inconsistent TableFactory class type");
|
|
|
|
}
|
|
|
|
if (base_tf == nullptr) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
assert(file_tf != nullptr);
|
|
|
|
|
|
|
|
const auto& base_opt = base_tf->table_options();
|
|
|
|
const auto& file_opt = file_tf->table_options();
|
|
|
|
|
2020-04-29 03:02:11 +02:00
|
|
|
std::string mismatch;
|
2017-07-29 01:23:50 +02:00
|
|
|
for (auto& pair : block_based_table_type_info) {
|
2020-04-29 03:02:11 +02:00
|
|
|
// We skip checking deprecated variables as they might
|
|
|
|
// contain random values since they might not be initialized
|
|
|
|
if (config_options.IsCheckEnabled(pair.second.GetSanityLevel())) {
|
|
|
|
const char* base_addr =
|
|
|
|
reinterpret_cast<const char*>(&base_opt) + pair.second.offset;
|
|
|
|
const char* file_addr =
|
|
|
|
reinterpret_cast<const char*>(&file_opt) + pair.second.offset;
|
|
|
|
|
|
|
|
if (!pair.second.MatchesOption(config_options, pair.first, base_addr,
|
|
|
|
file_addr, &mismatch) &&
|
|
|
|
!pair.second.MatchesByName(config_options, pair.first, base_addr,
|
|
|
|
file_addr)) {
|
2017-07-29 01:23:50 +02:00
|
|
|
return Status::Corruption(
|
|
|
|
"[RocksDBOptionsParser]: "
|
|
|
|
"failed the verification on BlockBasedTableOptions::",
|
|
|
|
pair.first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
2014-01-28 06:58:46 +01:00
|
|
|
TableFactory* NewBlockBasedTableFactory(
|
2015-11-18 01:41:54 +01:00
|
|
|
const BlockBasedTableOptions& _table_options) {
|
|
|
|
return new BlockBasedTableFactory(_table_options);
|
2014-01-28 06:58:46 +01:00
|
|
|
}
|
|
|
|
|
2017-07-29 01:23:50 +02:00
|
|
|
const std::string BlockBasedTableFactory::kName = "BlockBasedTable";
|
2014-03-01 03:19:07 +01:00
|
|
|
const std::string BlockBasedTablePropertyNames::kIndexType =
|
|
|
|
"rocksdb.block.based.table.index.type";
|
2015-02-05 02:03:57 +01:00
|
|
|
const std::string BlockBasedTablePropertyNames::kWholeKeyFiltering =
|
|
|
|
"rocksdb.block.based.table.whole.key.filtering";
|
|
|
|
const std::string BlockBasedTablePropertyNames::kPrefixFiltering =
|
|
|
|
"rocksdb.block.based.table.prefix.filtering";
|
2014-05-15 23:09:03 +02:00
|
|
|
const std::string kHashIndexPrefixesBlock = "rocksdb.hashindex.prefixes";
|
|
|
|
const std::string kHashIndexPrefixesMetadataBlock =
|
|
|
|
"rocksdb.hashindex.metadata";
|
2015-02-05 02:03:57 +01:00
|
|
|
const std::string kPropTrue = "1";
|
|
|
|
const std::string kPropFalse = "0";
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|