Fix memory leak on error opening PlainTable (#5951)
Summary: Several error paths in opening of a plain table would leak memory. PR https://github.com/facebook/rocksdb/issues/5940 opened the leak to one more error path, which happens to have been (mistakenly) exercised by CuckooTableDBTest.AdaptiveTable. That test has been fixed, and the exercising of plain table error cases (more than before) has been added as BadOptions1 and BadOptions2 to PlainTableDBTest. This effectively moved the memory leak to plain_table_db_test. Also here is a cheap fix for the memory leak, without (yet?) changing the signature of ReadTableProperties. This fixes ASAN on unit tests. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5951 Test Plan: make COMPILE_WITH_ASAN=1 check Differential Revision: D18051940 Pulled By: pdillinger fbshipit-source-id: e2952930c09a2b46c4f1ff09818c5090426929de
This commit is contained in:
parent
7245fb5f63
commit
27a124571f
@ -10,6 +10,8 @@
|
||||
* Fix a bug causing a crash during ingest external file when background compaction cause severe error (file not found).
|
||||
* Fix a bug when partitioned filters and prefix search are used in conjunction, ::SeekForPrev could return invalid for an existing prefix. ::SeekForPrev might be called by the user, or internally on ::Prev, or within ::Seek if the return value involves Delete or a Merge operand.
|
||||
* Fix OnFlushCompleted fired before flush result persisted in MANIFEST when there's concurrent flush job. The bug exists since OnFlushCompleted was introduced in rocksdb 3.8.
|
||||
* Fixed an sst_dump crash on some plain table SST files.
|
||||
* Fixed a memory leak in some error cases of opening plain table SST files.
|
||||
### New Features
|
||||
* Introduced DBOptions::max_write_batch_group_size_bytes to configure maximum limit on number of bytes that are written in a single batch of WAL or memtable write. It is followed when the leader write size is larger than 1/8 of this limit.
|
||||
* VerifyChecksum() by default will issue readahead. Allow ReadOptions to be passed in to those functions to override the readhead size. For checksum verifying before external SST file ingestion, a new option IngestExternalFileOptions.verify_checksums_readahead_size, is added for this readahead setting.
|
||||
|
@ -285,6 +285,9 @@ TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) {
|
||||
TEST_F(CuckooTableDBTest, AdaptiveTable) {
|
||||
Options options = CurrentOptions();
|
||||
|
||||
// Ensure options compatible with PlainTable
|
||||
options.prefix_extractor.reset(NewCappedPrefixTransform(8));
|
||||
|
||||
// Write some keys using cuckoo table.
|
||||
options.table_factory.reset(NewCuckooTableFactory());
|
||||
Reopen(&options);
|
||||
|
@ -393,6 +393,65 @@ class TestPlainTableFactory : public PlainTableFactory {
|
||||
const std::string column_family_name_;
|
||||
};
|
||||
|
||||
TEST_P(PlainTableDBTest, BadOptions1) {
|
||||
// Build with a prefix extractor
|
||||
ASSERT_OK(Put("1000000000000foo", "v1"));
|
||||
dbfull()->TEST_FlushMemTable();
|
||||
|
||||
// Bad attempt to re-open without a prefix extractor
|
||||
Options options = CurrentOptions();
|
||||
options.prefix_extractor.reset();
|
||||
Reopen(&options);
|
||||
ASSERT_EQ(
|
||||
"Invalid argument: Prefix extractor is missing when opening a PlainTable "
|
||||
"built using a prefix extractor",
|
||||
Get("1000000000000foo"));
|
||||
|
||||
// Bad attempt to re-open with different prefix extractor
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(6));
|
||||
Reopen(&options);
|
||||
ASSERT_EQ(
|
||||
"Invalid argument: Prefix extractor given doesn't match the one used to "
|
||||
"build PlainTable",
|
||||
Get("1000000000000foo"));
|
||||
|
||||
// Correct prefix extractor
|
||||
options.prefix_extractor.reset(NewFixedPrefixTransform(8));
|
||||
Reopen(&options);
|
||||
ASSERT_EQ("v1", Get("1000000000000foo"));
|
||||
}
|
||||
|
||||
TEST_P(PlainTableDBTest, BadOptions2) {
|
||||
Options options = CurrentOptions();
|
||||
options.prefix_extractor.reset();
|
||||
options.create_if_missing = true;
|
||||
DestroyAndReopen(&options);
|
||||
// Build without a prefix extractor
|
||||
// (apparently works even if hash_table_ratio > 0)
|
||||
ASSERT_OK(Put("1000000000000foo", "v1"));
|
||||
dbfull()->TEST_FlushMemTable();
|
||||
|
||||
// Bad attempt to re-open with hash_table_ratio > 0 and no prefix extractor
|
||||
Status s = TryReopen(&options);
|
||||
ASSERT_EQ(
|
||||
"Not implemented: PlainTable requires a prefix extractor enable prefix "
|
||||
"hash mode.",
|
||||
s.ToString());
|
||||
|
||||
// OK to open with hash_table_ratio == 0 and no prefix extractor
|
||||
PlainTableOptions plain_table_options;
|
||||
plain_table_options.hash_table_ratio = 0;
|
||||
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
||||
Reopen(&options);
|
||||
ASSERT_EQ("v1", Get("1000000000000foo"));
|
||||
|
||||
// OK to open newly with a prefix_extractor and hash table; builds index
|
||||
// in memory.
|
||||
options = CurrentOptions();
|
||||
Reopen(&options);
|
||||
ASSERT_EQ("v1", Get("1000000000000foo"));
|
||||
}
|
||||
|
||||
TEST_P(PlainTableDBTest, Flush) {
|
||||
for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
|
||||
huge_page_tlb_size += 2 * 1024 * 1024) {
|
||||
|
@ -128,10 +128,11 @@ Status PlainTableReader::Open(
|
||||
return Status::NotSupported("File is too large for PlainTableReader!");
|
||||
}
|
||||
|
||||
TableProperties* props = nullptr;
|
||||
TableProperties* props_ptr = nullptr;
|
||||
auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
|
||||
ioptions, &props,
|
||||
ioptions, &props_ptr,
|
||||
true /* compression_type_missing */);
|
||||
std::shared_ptr<TableProperties> props(props_ptr);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
@ -165,7 +166,7 @@ Status PlainTableReader::Open(
|
||||
|
||||
std::unique_ptr<PlainTableReader> new_reader(new PlainTableReader(
|
||||
ioptions, std::move(file), env_options, internal_comparator,
|
||||
encoding_type, file_size, props, prefix_extractor));
|
||||
encoding_type, file_size, props.get(), prefix_extractor));
|
||||
|
||||
s = new_reader->MmapDataIfNeeded();
|
||||
if (!s.ok()) {
|
||||
@ -173,8 +174,9 @@ Status PlainTableReader::Open(
|
||||
}
|
||||
|
||||
if (!full_scan_mode) {
|
||||
s = new_reader->PopulateIndex(props, bloom_bits_per_key, hash_table_ratio,
|
||||
index_sparseness, huge_page_tlb_size);
|
||||
s = new_reader->PopulateIndex(props.get(), bloom_bits_per_key,
|
||||
hash_table_ratio, index_sparseness,
|
||||
huge_page_tlb_size);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
@ -184,7 +186,7 @@ Status PlainTableReader::Open(
|
||||
new_reader->full_scan_mode_ = true;
|
||||
}
|
||||
// PopulateIndex can add to the props, so don't store them until now
|
||||
new_reader->table_properties_.reset(props);
|
||||
new_reader->table_properties_ = props;
|
||||
|
||||
if (immortal_table && new_reader->file_info_.is_mmap_mode) {
|
||||
new_reader->dummy_cleanable_.reset(new Cleanable());
|
||||
|
Loading…
Reference in New Issue
Block a user