2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-12-05 22:09:13 +01:00
|
|
|
|
|
|
|
#include "rocksdb/table_properties.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
|
2014-11-25 05:44:49 +01:00
|
|
|
#include "port/port.h"
|
2016-08-20 00:10:31 +02:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/block_based/block.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2016-08-20 00:10:31 +02:00
|
|
|
#include "table/table_properties_internal.h"
|
2014-11-25 05:44:49 +01:00
|
|
|
#include "util/string_util.h"
|
2013-12-05 22:09:13 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-12-05 22:09:13 +01:00
|
|
|
|
2015-10-09 01:57:35 +02:00
|
|
|
const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily =
|
|
|
|
port::kMaxInt32;
|
|
|
|
|
2013-12-05 22:09:13 +01:00
|
|
|
namespace {
|
|
|
|
void AppendProperty(
|
|
|
|
std::string& props,
|
|
|
|
const std::string& key,
|
|
|
|
const std::string& value,
|
|
|
|
const std::string& prop_delim,
|
|
|
|
const std::string& kv_delim) {
|
|
|
|
props.append(key);
|
|
|
|
props.append(kv_delim);
|
|
|
|
props.append(value);
|
|
|
|
props.append(prop_delim);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class TValue>
|
|
|
|
void AppendProperty(
|
|
|
|
std::string& props,
|
|
|
|
const std::string& key,
|
|
|
|
const TValue& value,
|
|
|
|
const std::string& prop_delim,
|
|
|
|
const std::string& kv_delim) {
|
|
|
|
AppendProperty(
|
2014-11-25 05:44:49 +01:00
|
|
|
props, key, ToString(value), prop_delim, kv_delim
|
2013-12-05 22:09:13 +01:00
|
|
|
);
|
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
|
|
|
|
// Seek to the specified meta block.
|
|
|
|
// Return true if it successfully seeks to that block.
|
|
|
|
Status SeekToMetaBlock(InternalIterator* meta_iter,
|
2016-08-20 00:10:31 +02:00
|
|
|
const std::string& block_name, bool* is_found,
|
|
|
|
BlockHandle* block_handle = nullptr) {
|
2016-11-05 17:10:51 +01:00
|
|
|
if (block_handle != nullptr) {
|
|
|
|
*block_handle = BlockHandle::NullBlockHandle();
|
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
*is_found = true;
|
|
|
|
meta_iter->Seek(block_name);
|
2016-08-20 00:10:31 +02:00
|
|
|
if (meta_iter->status().ok()) {
|
|
|
|
if (meta_iter->Valid() && meta_iter->key() == block_name) {
|
|
|
|
*is_found = true;
|
|
|
|
if (block_handle) {
|
|
|
|
Slice v = meta_iter->value();
|
|
|
|
return block_handle->DecodeFrom(&v);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*is_found = false;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
}
|
|
|
|
return meta_iter->status();
|
|
|
|
}
|
2013-12-05 22:09:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string TableProperties::ToString(
|
|
|
|
const std::string& prop_delim,
|
|
|
|
const std::string& kv_delim) const {
|
|
|
|
std::string result;
|
|
|
|
result.reserve(1024);
|
|
|
|
|
|
|
|
// Basic Info
|
2014-02-05 01:21:47 +01:00
|
|
|
AppendProperty(result, "# data blocks", num_data_blocks, prop_delim,
|
|
|
|
kv_delim);
|
2013-12-05 22:09:13 +01:00
|
|
|
AppendProperty(result, "# entries", num_entries, prop_delim, kv_delim);
|
2018-10-30 23:29:58 +01:00
|
|
|
AppendProperty(result, "# deletions", num_deletions, prop_delim, kv_delim);
|
|
|
|
AppendProperty(result, "# merge operands", num_merge_operands, prop_delim,
|
|
|
|
kv_delim);
|
2018-06-27 05:18:43 +02:00
|
|
|
AppendProperty(result, "# range deletions", num_range_deletions, prop_delim,
|
|
|
|
kv_delim);
|
2013-12-05 22:09:13 +01:00
|
|
|
|
|
|
|
AppendProperty(result, "raw key size", raw_key_size, prop_delim, kv_delim);
|
2014-02-05 01:21:47 +01:00
|
|
|
AppendProperty(result, "raw average key size",
|
|
|
|
num_entries != 0 ? 1.0 * raw_key_size / num_entries : 0.0,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
AppendProperty(result, "raw value size", raw_value_size, prop_delim,
|
|
|
|
kv_delim);
|
|
|
|
AppendProperty(result, "raw average value size",
|
|
|
|
num_entries != 0 ? 1.0 * raw_value_size / num_entries : 0.0,
|
|
|
|
prop_delim, kv_delim);
|
2013-12-05 22:09:13 +01:00
|
|
|
|
|
|
|
AppendProperty(result, "data block size", data_size, prop_delim, kv_delim);
|
2018-05-26 03:41:31 +02:00
|
|
|
char index_block_size_str[80];
|
|
|
|
snprintf(index_block_size_str, sizeof(index_block_size_str),
|
2018-08-10 01:49:45 +02:00
|
|
|
"index block size (user-key? %d, delta-value? %d)",
|
|
|
|
static_cast<int>(index_key_is_user_key),
|
|
|
|
static_cast<int>(index_value_is_delta_encoded));
|
2018-05-26 03:41:31 +02:00
|
|
|
AppendProperty(result, index_block_size_str, index_size, prop_delim,
|
|
|
|
kv_delim);
|
2017-06-13 19:59:22 +02:00
|
|
|
if (index_partitions != 0) {
|
|
|
|
AppendProperty(result, "# index partitions", index_partitions, prop_delim,
|
|
|
|
kv_delim);
|
|
|
|
AppendProperty(result, "top-level index size", top_level_index_size, prop_delim,
|
|
|
|
kv_delim);
|
|
|
|
}
|
2014-02-05 01:21:47 +01:00
|
|
|
AppendProperty(result, "filter block size", filter_size, prop_delim,
|
|
|
|
kv_delim);
|
|
|
|
AppendProperty(result, "(estimated) table size",
|
|
|
|
data_size + index_size + filter_size, prop_delim, kv_delim);
|
2013-12-05 22:09:13 +01:00
|
|
|
|
|
|
|
AppendProperty(
|
2014-02-05 01:21:47 +01:00
|
|
|
result, "filter policy name",
|
2013-12-05 22:09:13 +01:00
|
|
|
filter_policy_name.empty() ? std::string("N/A") : filter_policy_name,
|
2014-02-05 01:21:47 +01:00
|
|
|
prop_delim, kv_delim);
|
2013-12-05 22:09:13 +01:00
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
AppendProperty(result, "prefix extractor name",
|
|
|
|
prefix_extractor_name.empty() ? std::string("N/A")
|
|
|
|
: prefix_extractor_name,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
2016-04-09 03:50:18 +02:00
|
|
|
AppendProperty(result, "column family ID",
|
2020-02-20 21:07:53 +01:00
|
|
|
column_family_id ==
|
|
|
|
ROCKSDB_NAMESPACE::TablePropertiesCollectorFactory::
|
|
|
|
Context::kUnknownColumnFamily
|
2016-04-09 03:50:18 +02:00
|
|
|
? std::string("N/A")
|
2020-02-20 21:07:53 +01:00
|
|
|
: ROCKSDB_NAMESPACE::ToString(column_family_id),
|
2016-04-09 03:50:18 +02:00
|
|
|
prop_delim, kv_delim);
|
|
|
|
AppendProperty(
|
|
|
|
result, "column family name",
|
|
|
|
column_family_name.empty() ? std::string("N/A") : column_family_name,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
2016-04-21 19:16:28 +02:00
|
|
|
AppendProperty(result, "comparator name",
|
|
|
|
comparator_name.empty() ? std::string("N/A") : comparator_name,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
|
|
|
AppendProperty(
|
|
|
|
result, "merge operator name",
|
|
|
|
merge_operator_name.empty() ? std::string("N/A") : merge_operator_name,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
|
|
|
AppendProperty(result, "property collectors names",
|
|
|
|
property_collectors_names.empty() ? std::string("N/A")
|
|
|
|
: property_collectors_names,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
2016-05-12 18:47:16 +02:00
|
|
|
AppendProperty(
|
|
|
|
result, "SST file compression algo",
|
|
|
|
compression_name.empty() ? std::string("N/A") : compression_name,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
2019-04-02 23:48:52 +02:00
|
|
|
AppendProperty(
|
|
|
|
result, "SST file compression options",
|
|
|
|
compression_options.empty() ? std::string("N/A") : compression_options,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
2017-06-28 02:02:20 +02:00
|
|
|
AppendProperty(result, "creation time", creation_time, prop_delim, kv_delim);
|
|
|
|
|
2017-10-24 00:22:05 +02:00
|
|
|
AppendProperty(result, "time stamp of earliest key", oldest_key_time,
|
|
|
|
prop_delim, kv_delim);
|
|
|
|
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 04:24:25 +02:00
|
|
|
AppendProperty(result, "file creation time", file_creation_time, prop_delim,
|
|
|
|
kv_delim);
|
|
|
|
|
2021-04-01 03:20:44 +02:00
|
|
|
AppendProperty(result, "slow compression estimated data size",
|
|
|
|
slow_compression_estimated_data_size, prop_delim, kv_delim);
|
|
|
|
AppendProperty(result, "fast compression estimated data size",
|
|
|
|
fast_compression_estimated_data_size, prop_delim, kv_delim);
|
|
|
|
|
2020-06-17 19:55:42 +02:00
|
|
|
// DB identity and DB session ID
|
|
|
|
AppendProperty(result, "DB identity", db_id, prop_delim, kv_delim);
|
|
|
|
AppendProperty(result, "DB session identity", db_session_id, prop_delim,
|
|
|
|
kv_delim);
|
|
|
|
|
2013-12-05 22:09:13 +01:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-08-25 21:03:54 +02:00
|
|
|
void TableProperties::Add(const TableProperties& tp) {
|
|
|
|
data_size += tp.data_size;
|
|
|
|
index_size += tp.index_size;
|
2017-06-13 19:59:22 +02:00
|
|
|
index_partitions += tp.index_partitions;
|
|
|
|
top_level_index_size += tp.top_level_index_size;
|
2018-05-26 03:41:31 +02:00
|
|
|
index_key_is_user_key += tp.index_key_is_user_key;
|
2018-08-10 01:49:45 +02:00
|
|
|
index_value_is_delta_encoded += tp.index_value_is_delta_encoded;
|
2015-08-25 21:03:54 +02:00
|
|
|
filter_size += tp.filter_size;
|
|
|
|
raw_key_size += tp.raw_key_size;
|
|
|
|
raw_value_size += tp.raw_value_size;
|
|
|
|
num_data_blocks += tp.num_data_blocks;
|
|
|
|
num_entries += tp.num_entries;
|
2018-10-30 23:29:58 +01:00
|
|
|
num_deletions += tp.num_deletions;
|
|
|
|
num_merge_operands += tp.num_merge_operands;
|
2018-06-27 05:18:43 +02:00
|
|
|
num_range_deletions += tp.num_range_deletions;
|
2021-04-01 03:20:44 +02:00
|
|
|
slow_compression_estimated_data_size +=
|
|
|
|
tp.slow_compression_estimated_data_size;
|
|
|
|
fast_compression_estimated_data_size +=
|
|
|
|
tp.fast_compression_estimated_data_size;
|
2015-08-25 21:03:54 +02:00
|
|
|
}
|
|
|
|
|
2020-12-19 16:59:08 +01:00
|
|
|
std::map<std::string, uint64_t>
|
|
|
|
TableProperties::GetAggregatablePropertiesAsMap() const {
|
|
|
|
std::map<std::string, uint64_t> rv;
|
|
|
|
rv["data_size"] = data_size;
|
|
|
|
rv["index_size"] = index_size;
|
|
|
|
rv["index_partitions"] = index_partitions;
|
|
|
|
rv["top_level_index_size"] = top_level_index_size;
|
|
|
|
rv["filter_size"] = filter_size;
|
|
|
|
rv["raw_key_size"] = raw_key_size;
|
|
|
|
rv["raw_value_size"] = raw_value_size;
|
|
|
|
rv["num_data_blocks"] = num_data_blocks;
|
|
|
|
rv["num_entries"] = num_entries;
|
|
|
|
rv["num_deletions"] = num_deletions;
|
|
|
|
rv["num_merge_operands"] = num_merge_operands;
|
|
|
|
rv["num_range_deletions"] = num_range_deletions;
|
2021-04-01 03:20:44 +02:00
|
|
|
rv["slow_compression_estimated_data_size"] =
|
|
|
|
slow_compression_estimated_data_size;
|
|
|
|
rv["fast_compression_estimated_data_size"] =
|
|
|
|
fast_compression_estimated_data_size;
|
2020-12-19 16:59:08 +01:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2020-06-17 19:55:42 +02:00
|
|
|
const std::string TablePropertiesNames::kDbId = "rocksdb.creating.db.identity";
|
|
|
|
const std::string TablePropertiesNames::kDbSessionId =
|
|
|
|
"rocksdb.creating.session.identity";
|
2020-10-19 20:37:05 +02:00
|
|
|
const std::string TablePropertiesNames::kDbHostId =
|
|
|
|
"rocksdb.creating.host.identity";
|
2013-12-05 22:09:13 +01:00
|
|
|
const std::string TablePropertiesNames::kDataSize =
|
|
|
|
"rocksdb.data.size";
|
|
|
|
const std::string TablePropertiesNames::kIndexSize =
|
|
|
|
"rocksdb.index.size";
|
2017-06-13 19:59:22 +02:00
|
|
|
const std::string TablePropertiesNames::kIndexPartitions =
|
|
|
|
"rocksdb.index.partitions";
|
|
|
|
const std::string TablePropertiesNames::kTopLevelIndexSize =
|
|
|
|
"rocksdb.top-level.index.size";
|
2018-05-26 03:41:31 +02:00
|
|
|
const std::string TablePropertiesNames::kIndexKeyIsUserKey =
|
|
|
|
"rocksdb.index.key.is.user.key";
|
2018-08-10 01:49:45 +02:00
|
|
|
const std::string TablePropertiesNames::kIndexValueIsDeltaEncoded =
|
|
|
|
"rocksdb.index.value.is.delta.encoded";
|
2013-12-05 22:09:13 +01:00
|
|
|
const std::string TablePropertiesNames::kFilterSize =
|
|
|
|
"rocksdb.filter.size";
|
|
|
|
const std::string TablePropertiesNames::kRawKeySize =
|
|
|
|
"rocksdb.raw.key.size";
|
|
|
|
const std::string TablePropertiesNames::kRawValueSize =
|
|
|
|
"rocksdb.raw.value.size";
|
|
|
|
const std::string TablePropertiesNames::kNumDataBlocks =
|
|
|
|
"rocksdb.num.data.blocks";
|
|
|
|
const std::string TablePropertiesNames::kNumEntries =
|
|
|
|
"rocksdb.num.entries";
|
2018-10-30 23:29:58 +01:00
|
|
|
const std::string TablePropertiesNames::kDeletedKeys = "rocksdb.deleted.keys";
|
|
|
|
const std::string TablePropertiesNames::kMergeOperands =
|
|
|
|
"rocksdb.merge.operands";
|
2018-06-27 05:18:43 +02:00
|
|
|
const std::string TablePropertiesNames::kNumRangeDeletions =
|
|
|
|
"rocksdb.num.range-deletions";
|
2013-12-05 22:09:13 +01:00
|
|
|
const std::string TablePropertiesNames::kFilterPolicy =
|
|
|
|
"rocksdb.filter.policy";
|
2013-12-20 18:35:24 +01:00
|
|
|
const std::string TablePropertiesNames::kFormatVersion =
|
|
|
|
"rocksdb.format.version";
|
|
|
|
const std::string TablePropertiesNames::kFixedKeyLen =
|
|
|
|
"rocksdb.fixed.key.length";
|
2016-04-07 08:10:32 +02:00
|
|
|
const std::string TablePropertiesNames::kColumnFamilyId =
|
|
|
|
"rocksdb.column.family.id";
|
|
|
|
const std::string TablePropertiesNames::kColumnFamilyName =
|
|
|
|
"rocksdb.column.family.name";
|
2016-04-21 19:16:28 +02:00
|
|
|
const std::string TablePropertiesNames::kComparator = "rocksdb.comparator";
|
|
|
|
const std::string TablePropertiesNames::kMergeOperator =
|
|
|
|
"rocksdb.merge.operator";
|
2016-08-26 20:46:32 +02:00
|
|
|
const std::string TablePropertiesNames::kPrefixExtractorName =
|
|
|
|
"rocksdb.prefix.extractor.name";
|
2016-04-21 19:16:28 +02:00
|
|
|
const std::string TablePropertiesNames::kPropertyCollectors =
|
|
|
|
"rocksdb.property.collectors";
|
2016-05-12 18:47:16 +02:00
|
|
|
const std::string TablePropertiesNames::kCompression = "rocksdb.compression";
|
2019-04-02 23:48:52 +02:00
|
|
|
const std::string TablePropertiesNames::kCompressionOptions =
|
|
|
|
"rocksdb.compression_options";
|
2017-06-28 02:02:20 +02:00
|
|
|
const std::string TablePropertiesNames::kCreationTime = "rocksdb.creation.time";
|
2017-10-24 00:22:05 +02:00
|
|
|
const std::string TablePropertiesNames::kOldestKeyTime =
|
|
|
|
"rocksdb.oldest.key.time";
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 04:24:25 +02:00
|
|
|
const std::string TablePropertiesNames::kFileCreationTime =
|
|
|
|
"rocksdb.file.creation.time";
|
2021-04-01 03:20:44 +02:00
|
|
|
const std::string TablePropertiesNames::kSlowCompressionEstimatedDataSize =
|
|
|
|
"rocksdb.sample_for_compression.slow.data.size";
|
|
|
|
const std::string TablePropertiesNames::kFastCompressionEstimatedDataSize =
|
|
|
|
"rocksdb.sample_for_compression.fast.data.size";
|
2013-12-05 22:09:13 +01:00
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
extern const std::string kPropertiesBlock = "rocksdb.properties";
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
// Old property block name for backward compatibility
|
|
|
|
extern const std::string kPropertiesBlockOldName = "rocksdb.stats";
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
extern const std::string kCompressionDictBlock = "rocksdb.compression_dict";
|
2016-08-20 00:10:31 +02:00
|
|
|
extern const std::string kRangeDelBlock = "rocksdb.range_del";
|
2013-12-06 01:51:26 +01:00
|
|
|
|
2014-04-22 02:49:47 +02:00
|
|
|
// Seek to the properties block.
|
|
|
|
// Return true if it successfully seeks to the properties block.
|
2015-10-13 00:06:38 +02:00
|
|
|
Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found) {
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
Status status = SeekToMetaBlock(meta_iter, kPropertiesBlock, is_found);
|
|
|
|
if (!*is_found && status.ok()) {
|
|
|
|
status = SeekToMetaBlock(meta_iter, kPropertiesBlockOldName, is_found);
|
2014-04-22 02:49:47 +02:00
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Seek to the compression dictionary block.
|
|
|
|
// Return true if it successfully seeks to that block.
|
2018-01-11 02:03:12 +01:00
|
|
|
Status SeekToCompressionDictBlock(InternalIterator* meta_iter, bool* is_found,
|
|
|
|
BlockHandle* block_handle) {
|
|
|
|
return SeekToMetaBlock(meta_iter, kCompressionDictBlock, is_found, block_handle);
|
2014-04-22 02:49:47 +02:00
|
|
|
}
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
Status SeekToRangeDelBlock(InternalIterator* meta_iter, bool* is_found,
|
|
|
|
BlockHandle* block_handle = nullptr) {
|
|
|
|
return SeekToMetaBlock(meta_iter, kRangeDelBlock, is_found, block_handle);
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|