2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2014-04-15 22:39:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/plain/plain_table_builder.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
|
|
|
|
#include <assert.h>
|
2014-11-11 22:47:22 +01:00
|
|
|
|
|
|
|
#include <string>
|
|
|
|
#include <limits>
|
2013-10-29 04:34:02 +01:00
|
|
|
#include <map>
|
|
|
|
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "db/dbformat.h"
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "file/writable_file_writer.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/options.h"
|
2014-06-19 01:36:48 +02:00
|
|
|
#include "rocksdb/table.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/block_based/block_builder.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "table/format.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
#include "table/meta_blocks.h"
|
2019-09-05 19:03:42 +02:00
|
|
|
#include "table/plain/plain_table_bloom.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "table/plain/plain_table_factory.h"
|
|
|
|
#include "table/plain/plain_table_index.h"
|
2013-10-29 04:34:02 +01:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/crc32c.h"
|
|
|
|
#include "util/stop_watch.h"
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// a utility that helps writing block content to the file
|
|
|
|
// @offset will advance if @block_contents was successfully written.
|
|
|
|
// @block_handle the block handle this particular block.
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
IOStatus WriteBlock(const Slice& block_contents, WritableFileWriter* file,
|
|
|
|
uint64_t* offset, BlockHandle* block_handle) {
|
2013-12-06 01:51:26 +01:00
|
|
|
block_handle->set_offset(*offset);
|
|
|
|
block_handle->set_size(block_contents.size());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
IOStatus io_s = file->Append(block_contents);
|
2013-12-06 01:51:26 +01:00
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (io_s.ok()) {
|
2013-12-06 01:51:26 +01:00
|
|
|
*offset += block_contents.size();
|
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
return io_s;
|
2013-12-06 01:51:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// kPlainTableMagicNumber was picked by running
|
2014-05-01 20:09:32 +02:00
|
|
|
// echo rocksdb.table.plain | sha1sum
|
2013-12-06 01:51:26 +01:00
|
|
|
// and taking the leading 64 bits.
|
2014-05-01 20:09:32 +02:00
|
|
|
extern const uint64_t kPlainTableMagicNumber = 0x8242229663bf9564ull;
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber = 0x4f3418eb7a8f13b8ull;
|
2013-12-06 01:51:26 +01:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
PlainTableBuilder::PlainTableBuilder(
|
2018-05-21 23:33:55 +02:00
|
|
|
const ImmutableCFOptions& ioptions, const MutableCFOptions& moptions,
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
|
|
|
|
int_tbl_prop_collector_factories,
|
2015-10-09 01:57:35 +02:00
|
|
|
uint32_t column_family_id, WritableFileWriter* file, uint32_t user_key_len,
|
|
|
|
EncodingType encoding_type, size_t index_sparseness,
|
2019-03-02 00:41:55 +01:00
|
|
|
uint32_t bloom_bits_per_key, const std::string& column_family_name,
|
|
|
|
uint32_t num_probes, size_t huge_page_tlb_size, double hash_table_ratio,
|
|
|
|
bool store_index_in_file)
|
2014-09-05 01:18:36 +02:00
|
|
|
: ioptions_(ioptions),
|
2018-05-21 23:33:55 +02:00
|
|
|
moptions_(moptions),
|
2019-03-02 00:41:55 +01:00
|
|
|
bloom_block_(num_probes),
|
2014-06-19 01:36:48 +02:00
|
|
|
file_(file),
|
2019-03-02 00:41:55 +01:00
|
|
|
bloom_bits_per_key_(bloom_bits_per_key),
|
|
|
|
huge_page_tlb_size_(huge_page_tlb_size),
|
2018-05-21 23:33:55 +02:00
|
|
|
encoder_(encoding_type, user_key_len, moptions.prefix_extractor.get(),
|
2014-07-19 01:58:13 +02:00
|
|
|
index_sparseness),
|
2019-03-02 00:41:55 +01:00
|
|
|
store_index_in_file_(store_index_in_file),
|
2018-05-21 23:33:55 +02:00
|
|
|
prefix_extractor_(moptions.prefix_extractor.get()) {
|
2019-03-02 00:41:55 +01:00
|
|
|
// Build index block and save it in the file if hash_table_ratio > 0
|
|
|
|
if (store_index_in_file_) {
|
|
|
|
assert(hash_table_ratio > 0 || IsTotalOrderMode());
|
|
|
|
index_builder_.reset(new PlainTableIndexBuilder(
|
|
|
|
&arena_, ioptions, moptions.prefix_extractor.get(), index_sparseness,
|
|
|
|
hash_table_ratio, huge_page_tlb_size_));
|
|
|
|
properties_.user_collected_properties
|
|
|
|
[PlainTablePropertyNames::kBloomVersion] = "1"; // For future use
|
|
|
|
}
|
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
properties_.fixed_key_len = user_key_len;
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
// for plain table, we put all the data in a big chuck.
|
|
|
|
properties_.num_data_blocks = 1;
|
2019-03-02 00:41:55 +01:00
|
|
|
// Fill it later if store_index_in_file_ == true
|
2013-12-06 01:51:26 +01:00
|
|
|
properties_.index_size = 0;
|
|
|
|
properties_.filter_size = 0;
|
2014-06-19 01:36:48 +02:00
|
|
|
// To support roll-back to previous version, now still use version 0 for
|
|
|
|
// plain encoding.
|
|
|
|
properties_.format_version = (encoding_type == kPlain) ? 0 : 1;
|
2016-04-07 08:10:32 +02:00
|
|
|
properties_.column_family_id = column_family_id;
|
|
|
|
properties_.column_family_name = column_family_name;
|
2018-05-21 23:33:55 +02:00
|
|
|
properties_.prefix_extractor_name = moptions_.prefix_extractor != nullptr
|
|
|
|
? moptions_.prefix_extractor->Name()
|
2016-08-26 20:46:32 +02:00
|
|
|
: "nullptr";
|
2014-06-19 01:36:48 +02:00
|
|
|
|
|
|
|
std::string val;
|
|
|
|
PutFixed32(&val, static_cast<uint32_t>(encoder_.GetEncodingType()));
|
|
|
|
properties_.user_collected_properties
|
|
|
|
[PlainTablePropertyNames::kEncodingType] = val;
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 21:30:55 +02:00
|
|
|
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
for (auto& collector_factories : *int_tbl_prop_collector_factories) {
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 21:30:55 +02:00
|
|
|
table_properties_collectors_.emplace_back(
|
2015-10-09 01:57:35 +02:00
|
|
|
collector_factories->CreateIntTblPropCollector(column_family_id));
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 21:30:55 +02:00
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
PlainTableBuilder::~PlainTableBuilder() {
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableBuilder::Add(const Slice& key, const Slice& value) {
|
2014-06-19 01:36:48 +02:00
|
|
|
// temp buffer for metadata bytes between key and value.
|
|
|
|
char meta_bytes_buf[6];
|
|
|
|
size_t meta_bytes_buf_size = 0;
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
ParsedInternalKey internal_key;
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
2016-10-18 21:04:56 +02:00
|
|
|
if (!ParseInternalKey(key, &internal_key)) {
|
|
|
|
assert(false);
|
|
|
|
return;
|
|
|
|
}
|
2016-08-20 00:10:31 +02:00
|
|
|
if (internal_key.type == kTypeRangeDeletion) {
|
|
|
|
status_ = Status::NotSupported("Range deletion unsupported");
|
|
|
|
return;
|
|
|
|
}
|
2014-07-19 01:58:13 +02:00
|
|
|
|
2019-03-02 00:41:55 +01:00
|
|
|
// Store key hash
|
|
|
|
if (store_index_in_file_) {
|
|
|
|
if (moptions_.prefix_extractor == nullptr) {
|
|
|
|
keys_or_prefixes_hashes_.push_back(GetSliceHash(internal_key.user_key));
|
|
|
|
} else {
|
|
|
|
Slice prefix =
|
|
|
|
moptions_.prefix_extractor->Transform(internal_key.user_key);
|
|
|
|
keys_or_prefixes_hashes_.push_back(GetSliceHash(prefix));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
// Write value
|
2014-11-11 22:47:22 +01:00
|
|
|
assert(offset_ <= std::numeric_limits<uint32_t>::max());
|
2019-03-02 00:41:55 +01:00
|
|
|
auto prev_offset = static_cast<uint32_t>(offset_);
|
2014-06-19 01:36:48 +02:00
|
|
|
// Write out the key
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status_ = encoder_.AppendKey(key, file_, &offset_, meta_bytes_buf,
|
|
|
|
&meta_bytes_buf_size);
|
2019-03-02 00:41:55 +01:00
|
|
|
if (SaveIndexInFile()) {
|
|
|
|
index_builder_->AddKeyPrefix(GetPrefix(internal_key), prev_offset);
|
|
|
|
}
|
2013-10-29 04:34:02 +01:00
|
|
|
|
2013-12-20 18:35:24 +01:00
|
|
|
// Write value length
|
2014-11-11 22:47:22 +01:00
|
|
|
uint32_t value_size = static_cast<uint32_t>(value.size());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (io_status_.ok()) {
|
|
|
|
char* end_ptr =
|
|
|
|
EncodeVarint32(meta_bytes_buf + meta_bytes_buf_size, value_size);
|
|
|
|
assert(end_ptr <= meta_bytes_buf + sizeof(meta_bytes_buf));
|
|
|
|
meta_bytes_buf_size = end_ptr - meta_bytes_buf;
|
|
|
|
io_status_ = file_->Append(Slice(meta_bytes_buf, meta_bytes_buf_size));
|
|
|
|
}
|
2013-12-20 18:35:24 +01:00
|
|
|
|
|
|
|
// Write value
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (io_status_.ok()) {
|
|
|
|
io_status_ = file_->Append(value);
|
|
|
|
offset_ += value_size + meta_bytes_buf_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (io_status_.ok()) {
|
|
|
|
properties_.num_entries++;
|
|
|
|
properties_.raw_key_size += key.size();
|
|
|
|
properties_.raw_value_size += value.size();
|
|
|
|
if (internal_key.type == kTypeDeletion ||
|
|
|
|
internal_key.type == kTypeSingleDeletion) {
|
|
|
|
properties_.num_deletions++;
|
|
|
|
} else if (internal_key.type == kTypeMerge) {
|
|
|
|
properties_.num_merge_operands++;
|
|
|
|
}
|
2018-10-30 23:29:58 +01:00
|
|
|
}
|
2013-12-06 01:51:26 +01:00
|
|
|
|
|
|
|
// notify property collectors
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
NotifyCollectTableCollectorsOnAdd(
|
|
|
|
key, value, offset_, table_properties_collectors_, ioptions_.info_log);
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
status_ = io_status_;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Status PlainTableBuilder::Finish() {
|
|
|
|
assert(!closed_);
|
|
|
|
closed_ = true;
|
2013-12-06 01:51:26 +01:00
|
|
|
|
|
|
|
properties_.data_size = offset_;
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
// Write the following blocks
|
|
|
|
// 1. [meta block: bloom] - optional
|
|
|
|
// 2. [meta block: index] - optional
|
|
|
|
// 3. [meta block: properties]
|
|
|
|
// 4. [metaindex block]
|
|
|
|
// 5. [footer]
|
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
MetaIndexBuilder meta_index_builer;
|
|
|
|
|
2019-03-02 00:41:55 +01:00
|
|
|
if (store_index_in_file_ && (properties_.num_entries > 0)) {
|
|
|
|
assert(properties_.num_entries <= std::numeric_limits<uint32_t>::max());
|
|
|
|
Status s;
|
|
|
|
BlockHandle bloom_block_handle;
|
|
|
|
if (bloom_bits_per_key_ > 0) {
|
|
|
|
bloom_block_.SetTotalBits(
|
|
|
|
&arena_,
|
|
|
|
static_cast<uint32_t>(properties_.num_entries) * bloom_bits_per_key_,
|
|
|
|
ioptions_.bloom_locality, huge_page_tlb_size_, ioptions_.info_log);
|
|
|
|
|
|
|
|
PutVarint32(&properties_.user_collected_properties
|
|
|
|
[PlainTablePropertyNames::kNumBloomBlocks],
|
|
|
|
bloom_block_.GetNumBlocks());
|
|
|
|
|
|
|
|
bloom_block_.AddKeysHashes(keys_or_prefixes_hashes_);
|
|
|
|
|
|
|
|
Slice bloom_finish_result = bloom_block_.Finish();
|
|
|
|
|
|
|
|
properties_.filter_size = bloom_finish_result.size();
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status_ =
|
|
|
|
WriteBlock(bloom_finish_result, file_, &offset_, &bloom_block_handle);
|
2019-03-02 00:41:55 +01:00
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (!io_status_.ok()) {
|
|
|
|
status_ = io_status_;
|
|
|
|
return status_;
|
2019-03-02 00:41:55 +01:00
|
|
|
}
|
|
|
|
meta_index_builer.Add(BloomBlockBuilder::kBloomBlock, bloom_block_handle);
|
|
|
|
}
|
|
|
|
BlockHandle index_block_handle;
|
|
|
|
Slice index_finish_result = index_builder_->Finish();
|
|
|
|
|
|
|
|
properties_.index_size = index_finish_result.size();
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status_ =
|
|
|
|
WriteBlock(index_finish_result, file_, &offset_, &index_block_handle);
|
2019-03-02 00:41:55 +01:00
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (!io_status_.ok()) {
|
|
|
|
status_ = io_status_;
|
|
|
|
return status_;
|
2019-03-02 00:41:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
meta_index_builer.Add(PlainTableIndexBuilder::kPlainTableIndexBlock,
|
|
|
|
index_block_handle);
|
|
|
|
}
|
|
|
|
|
2014-07-19 01:58:13 +02:00
|
|
|
// Calculate bloom block size and index block size
|
2013-12-06 01:51:26 +01:00
|
|
|
PropertyBlockBuilder property_block_builder;
|
|
|
|
// -- Add basic properties
|
|
|
|
property_block_builder.AddTableProperty(properties_);
|
|
|
|
|
2014-06-19 01:36:48 +02:00
|
|
|
property_block_builder.Add(properties_.user_collected_properties);
|
|
|
|
|
2013-12-06 01:51:26 +01:00
|
|
|
// -- Add user collected properties
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 21:30:55 +02:00
|
|
|
NotifyCollectTableCollectorsOnFinish(table_properties_collectors_,
|
2014-09-05 01:18:36 +02:00
|
|
|
ioptions_.info_log,
|
TablePropertiesCollectorFactory
Summary:
This diff addresses task #4296714 and rethinks how users provide us with TablePropertiesCollectors as part of Options.
Here's description of task #4296714:
I'm debugging #4295529 and noticed that our count of user properties kDeletedKeys is wrong. We're sharing one single InternalKeyPropertiesCollector with all Table Builders. In LOG Files, we're outputting number of kDeletedKeys as connected with a single table, while it's actually the total count of deleted keys since creation of the DB.
For example, this table has 3155 entries and 1391828 deleted keys.
The problem with current approach that we call methods on a single TablePropertiesCollector for all the tables we create. Even worse, we could do it from multiple threads at the same time and TablePropertiesCollector has no way of knowing which table we're calling it for.
Good part: Looks like nobody inside Facebook is using Options::table_properties_collectors. This means we should be able to painfully change the API.
In this change, I introduce TablePropertiesCollectorFactory. For every table we create, we call `CreateTablePropertiesCollector`, which creates a TablePropertiesCollector for a single table. We then use it sequentially from a single thread, which means it doesn't have to be thread-safe.
Test Plan:
Added a test in table_properties_collector_test that fails on master (build two tables, assert that kDeletedKeys count is correct for the second one).
Also, all other tests
Reviewers: sdong, dhruba, haobo, kailiu
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18579
2014-05-13 21:30:55 +02:00
|
|
|
&property_block_builder);
|
2013-12-06 01:51:26 +01:00
|
|
|
|
|
|
|
// -- Write property block
|
|
|
|
BlockHandle property_block_handle;
|
2020-03-29 04:05:54 +02:00
|
|
|
IOStatus s = WriteBlock(property_block_builder.Finish(), file_, &offset_,
|
|
|
|
&property_block_handle);
|
2013-12-06 01:51:26 +01:00
|
|
|
if (!s.ok()) {
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
return std::move(s);
|
2013-12-06 01:51:26 +01:00
|
|
|
}
|
|
|
|
meta_index_builer.Add(kPropertiesBlock, property_block_handle);
|
|
|
|
|
|
|
|
// -- write metaindex block
|
|
|
|
BlockHandle metaindex_block_handle;
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status_ = WriteBlock(meta_index_builer.Finish(), file_, &offset_,
|
|
|
|
&metaindex_block_handle);
|
|
|
|
if (!io_status_.ok()) {
|
|
|
|
status_ = io_status_;
|
|
|
|
return status_;
|
2013-12-06 01:51:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write Footer
|
2014-05-01 20:09:32 +02:00
|
|
|
// no need to write out new footer if we're using default checksum
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer(kLegacyPlainTableMagicNumber, 0);
|
2013-12-06 01:51:26 +01:00
|
|
|
footer.set_metaindex_handle(metaindex_block_handle);
|
|
|
|
footer.set_index_handle(BlockHandle::NullBlockHandle());
|
|
|
|
std::string footer_encoding;
|
|
|
|
footer.EncodeTo(&footer_encoding);
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status_ = file_->Append(footer_encoding);
|
|
|
|
if (io_status_.ok()) {
|
2013-12-06 01:51:26 +01:00
|
|
|
offset_ += footer_encoding.size();
|
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
status_ = io_status_;
|
|
|
|
return status_;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableBuilder::Abandon() {
|
|
|
|
closed_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t PlainTableBuilder::NumEntries() const {
|
2013-12-06 01:51:26 +01:00
|
|
|
return properties_.num_entries;
|
2013-10-29 04:34:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t PlainTableBuilder::FileSize() const {
|
|
|
|
return offset_;
|
|
|
|
}
|
|
|
|
|
2020-03-30 00:57:02 +02:00
|
|
|
std::string PlainTableBuilder::GetFileChecksum() const {
|
|
|
|
if (file_ != nullptr) {
|
|
|
|
return file_->GetFileChecksum();
|
|
|
|
} else {
|
|
|
|
return kUnknownFileChecksum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-11 00:42:46 +01:00
|
|
|
const char* PlainTableBuilder::GetFileChecksumFuncName() const {
|
|
|
|
if (file_ != nullptr) {
|
|
|
|
return file_->GetFileChecksumFuncName();
|
|
|
|
} else {
|
|
|
|
return kUnknownFileChecksumFuncName.c_str();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-04-15 22:39:26 +02:00
|
|
|
#endif // ROCKSDB_LITE
|