Fix crash in Transaction::MultiGet() when num_keys > 32
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/6192 Test Plan: Add a unit test that fails without the fix and passes now make check Differential Revision: D19124781 Pulled By: anand1976 fbshipit-source-id: 8c8cb6fa16c3fc23ec011e168561a13f76bbd783
This commit is contained in:
parent
d6e199016c
commit
2ba7f1e574
@ -1,4 +1,8 @@
|
||||
# Rocksdb Change Log
|
||||
## 6.6.1
|
||||
### Bug Fixes
|
||||
* Fix a bug in WriteBatchWithIndex::MultiGetFromBatchAndDB, which is called by Transaction::MultiGet, that causes due to stale pointer access when the number of keys is > 32
|
||||
|
||||
## 6.6.0 (11/25/2019)
|
||||
### Bug Fixes
|
||||
* Fix data corruption casued by output of intra-L0 compaction on ingested file not being placed in correct order in L0.
|
||||
|
@ -2823,6 +2823,104 @@ TEST_P(TransactionTest, MultiGetBatchedTest) {
|
||||
}
|
||||
}
|
||||
|
||||
// This test calls WriteBatchWithIndex::MultiGetFromBatchAndDB with a large
|
||||
// number of keys, i.e greater than MultiGetContext::MAX_BATCH_SIZE, which is
|
||||
// is 32. This forces autovector allocations in the MultiGet code paths
|
||||
// to use std::vector in addition to stack allocations. The MultiGet keys
|
||||
// includes Merges, which are handled specially in MultiGetFromBatchAndDB by
|
||||
// allocating an autovector of MergeContexts
|
||||
TEST_P(TransactionTest, MultiGetLargeBatchedTest) {
|
||||
WriteOptions write_options;
|
||||
ReadOptions read_options, snapshot_read_options;
|
||||
string value;
|
||||
Status s;
|
||||
|
||||
ColumnFamilyHandle* cf;
|
||||
ColumnFamilyOptions cf_options;
|
||||
|
||||
std::vector<std::string> key_str;
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
key_str.emplace_back(std::to_string(i));
|
||||
}
|
||||
// Create a new column families
|
||||
s = db->CreateColumnFamily(cf_options, "CF", &cf);
|
||||
ASSERT_OK(s);
|
||||
|
||||
delete cf;
|
||||
delete db;
|
||||
db = nullptr;
|
||||
|
||||
// open DB with three column families
|
||||
std::vector<ColumnFamilyDescriptor> column_families;
|
||||
// have to open default column family
|
||||
column_families.push_back(
|
||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
|
||||
// open the new column families
|
||||
cf_options.merge_operator = MergeOperators::CreateStringAppendOperator();
|
||||
column_families.push_back(ColumnFamilyDescriptor("CF", cf_options));
|
||||
|
||||
std::vector<ColumnFamilyHandle*> handles;
|
||||
|
||||
options.merge_operator = MergeOperators::CreateStringAppendOperator();
|
||||
ASSERT_OK(ReOpenNoDelete(column_families, &handles));
|
||||
assert(db != nullptr);
|
||||
|
||||
// Write some data to the db
|
||||
WriteBatch batch;
|
||||
for (int i = 0; i < 3 * MultiGetContext::MAX_BATCH_SIZE; ++i) {
|
||||
std::string val = "val" + std::to_string(i);
|
||||
batch.Put(handles[1], key_str[i], val);
|
||||
}
|
||||
s = db->Write(write_options, &batch);
|
||||
ASSERT_OK(s);
|
||||
|
||||
WriteBatchWithIndex wb;
|
||||
// Write some data to the db
|
||||
s = wb.Delete(handles[1], std::to_string(1));
|
||||
ASSERT_OK(s);
|
||||
s = wb.Put(handles[1], std::to_string(2), "new_val" + std::to_string(2));
|
||||
ASSERT_OK(s);
|
||||
// Write a lot of merges so when we call MultiGetFromBatchAndDB later on,
|
||||
// it is forced to use std::vector in rocksdb::autovector to allocate
|
||||
// MergeContexts. The number of merges needs to be >
|
||||
// MultiGetContext::MAX_BATCH_SIZE
|
||||
for (int i = 8; i < MultiGetContext::MAX_BATCH_SIZE + 24; ++i) {
|
||||
s = wb.Merge(handles[1], std::to_string(i), "merge");
|
||||
ASSERT_OK(s);
|
||||
}
|
||||
|
||||
// MultiGet a lot of keys in order to force std::vector reallocations
|
||||
std::vector<Slice> keys;
|
||||
for (int i = 0; i < MultiGetContext::MAX_BATCH_SIZE + 32; ++i) {
|
||||
keys.emplace_back(key_str[i]);
|
||||
}
|
||||
std::vector<PinnableSlice> values(keys.size());
|
||||
std::vector<Status> statuses(keys.size());
|
||||
|
||||
wb.MultiGetFromBatchAndDB(db, snapshot_read_options, handles[1], keys.size(), keys.data(),
|
||||
values.data(), statuses.data(), false);
|
||||
for (size_t i =0; i < keys.size(); ++i) {
|
||||
if (i == 1) {
|
||||
ASSERT_TRUE(statuses[1].IsNotFound());
|
||||
} else if (i == 2) {
|
||||
ASSERT_TRUE(statuses[2].ok());
|
||||
ASSERT_EQ(values[2], "new_val" + std::to_string(2));
|
||||
} else if (i >= 8 && i < 56) {
|
||||
ASSERT_TRUE(statuses[i].ok());
|
||||
ASSERT_EQ(values[i], "val" + std::to_string(i) + ",merge");
|
||||
} else {
|
||||
ASSERT_TRUE(statuses[i].ok());
|
||||
if (values[i] != "val" + std::to_string(i)) {
|
||||
ASSERT_EQ(values[i], "val" + std::to_string(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto handle : handles) {
|
||||
delete handle;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(TransactionTest, ColumnFamiliesTest2) {
|
||||
WriteOptions write_options;
|
||||
ReadOptions read_options, snapshot_read_options;
|
||||
|
@ -1004,10 +1004,13 @@ void WriteBatchWithIndex::MultiGetFromBatchAndDB(
|
||||
assert(result == WriteBatchWithIndexInternal::Result::kMergeInProgress ||
|
||||
result == WriteBatchWithIndexInternal::Result::kNotFound);
|
||||
key_context.emplace_back(column_family, keys[i], &values[i], &statuses[i]);
|
||||
sorted_keys.emplace_back(&key_context.back());
|
||||
merges.emplace_back(result, std::move(merge_context));
|
||||
}
|
||||
|
||||
for (KeyContext& key : key_context) {
|
||||
sorted_keys.emplace_back(&key);
|
||||
}
|
||||
|
||||
// Did not find key in batch OR could not resolve Merges. Try DB.
|
||||
static_cast_with_check<DBImpl, DB>(db->GetRootDB())
|
||||
->PrepareMultiGetKeys(key_context.size(), sorted_input, &sorted_keys);
|
||||
|
Loading…
Reference in New Issue
Block a user