Fix HashSkipList and HashLinkedList SIGSEGV
Summary: Original Summary: Yesterday, @ljin and I were debugging various db_stress issues. We suspected one of them happens when we concurrently call NewIterator without prefix_seek on HashSkipList. This test demonstrates it. Update: Arena is not thread-safe!! When creating a new full iterator, we *have* to create a new arena, otherwise we're doomed. Test Plan: SIGSEGV and assertion-throwing test now works! Reviewers: ljin, haobo, sdong Reviewed By: sdong CC: leveldb, ljin Differential Revision: https://reviews.facebook.net/D16857
This commit is contained in:
parent
6c72079d77
commit
3c75cc15a9
@ -369,6 +369,43 @@ TEST(PrefixTest, TestResult) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PrefixTest, FullIterator) {
|
||||
while (NextOptions(1000000)) {
|
||||
DestroyDB(kDbName, Options());
|
||||
auto db = OpenDb();
|
||||
WriteOptions write_options;
|
||||
|
||||
std::vector<uint64_t> prefixes;
|
||||
for (uint64_t i = 0; i < 100; ++i) {
|
||||
prefixes.push_back(i);
|
||||
}
|
||||
std::random_shuffle(prefixes.begin(), prefixes.end());
|
||||
|
||||
for (auto prefix : prefixes) {
|
||||
for (uint64_t i = 0; i < 200; ++i) {
|
||||
TestKey test_key(prefix, i);
|
||||
Slice key = TestKeyToSlice(test_key);
|
||||
ASSERT_OK(db->Put(write_options, key, Slice("0")));
|
||||
}
|
||||
}
|
||||
|
||||
auto func = [](void* db_void) {
|
||||
auto db = reinterpret_cast<DB*>(db_void);
|
||||
std::unique_ptr<Iterator> iter(db->NewIterator(ReadOptions()));
|
||||
iter->SeekToFirst();
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
iter->Next();
|
||||
}
|
||||
};
|
||||
|
||||
auto env = Env::Default();
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
env->StartThread(func, reinterpret_cast<void*>(db.get()));
|
||||
}
|
||||
env->WaitForJoin();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PrefixTest, DynamicPrefixIterator) {
|
||||
while (NextOptions(FLAGS_bucket_count)) {
|
||||
std::cout << "*** Mem table: " << options.memtable_factory->Name()
|
||||
|
@ -52,6 +52,8 @@ class Arena {
|
||||
// same size of that allocation.
|
||||
virtual size_t IrregularBlockNum() const { return irregular_block_num; }
|
||||
|
||||
size_t BlockSize() const { return kBlockSize; }
|
||||
|
||||
private:
|
||||
// Number of bytes allocated in one block
|
||||
const size_t kBlockSize;
|
||||
|
@ -141,8 +141,8 @@ class HashLinkListRep : public MemTableRep {
|
||||
|
||||
class FullListIterator : public MemTableRep::Iterator {
|
||||
public:
|
||||
explicit FullListIterator(FullList* list)
|
||||
: iter_(list), full_list_(list) {}
|
||||
explicit FullListIterator(FullList* list, Arena* arena)
|
||||
: iter_(list), full_list_(list), arena_(arena) {}
|
||||
|
||||
virtual ~FullListIterator() {
|
||||
}
|
||||
@ -196,6 +196,7 @@ class HashLinkListRep : public MemTableRep {
|
||||
FullList::Iterator iter_;
|
||||
// To destruct with the iterator.
|
||||
std::unique_ptr<FullList> full_list_;
|
||||
std::unique_ptr<Arena> arena_;
|
||||
std::string tmp_; // For passing to EncodeKey
|
||||
};
|
||||
|
||||
@ -416,7 +417,9 @@ void HashLinkListRep::Get(const LookupKey& k, void* callback_args,
|
||||
}
|
||||
|
||||
MemTableRep::Iterator* HashLinkListRep::GetIterator() {
|
||||
auto list = new FullList(compare_, arena_);
|
||||
// allocate a new arena of similar size to the one currently in use
|
||||
Arena* new_arena = new Arena(arena_->BlockSize());
|
||||
auto list = new FullList(compare_, new_arena);
|
||||
for (size_t i = 0; i < bucket_size_; ++i) {
|
||||
auto bucket = GetBucket(i);
|
||||
if (bucket != nullptr) {
|
||||
@ -426,7 +429,7 @@ MemTableRep::Iterator* HashLinkListRep::GetIterator() {
|
||||
}
|
||||
}
|
||||
}
|
||||
return new FullListIterator(list);
|
||||
return new FullListIterator(list, new_arena);
|
||||
}
|
||||
|
||||
MemTableRep::Iterator* HashLinkListRep::GetPrefixIterator(
|
||||
|
@ -81,10 +81,9 @@ class HashSkipListRep : public MemTableRep {
|
||||
|
||||
class Iterator : public MemTableRep::Iterator {
|
||||
public:
|
||||
explicit Iterator(Bucket* list, bool own_list = true)
|
||||
: list_(list),
|
||||
iter_(list),
|
||||
own_list_(own_list) {}
|
||||
explicit Iterator(Bucket* list, bool own_list = true,
|
||||
Arena* arena = nullptr)
|
||||
: list_(list), iter_(list), own_list_(own_list), arena_(arena) {}
|
||||
|
||||
virtual ~Iterator() {
|
||||
// if we own the list, we should also delete it
|
||||
@ -163,6 +162,7 @@ class HashSkipListRep : public MemTableRep {
|
||||
// here we track if we own list_. If we own it, we are also
|
||||
// responsible for it's cleaning. This is a poor man's shared_ptr
|
||||
bool own_list_;
|
||||
std::unique_ptr<Arena> arena_;
|
||||
std::string tmp_; // For passing to EncodeKey
|
||||
};
|
||||
|
||||
@ -289,7 +289,9 @@ void HashSkipListRep::Get(const LookupKey& k, void* callback_args,
|
||||
}
|
||||
|
||||
MemTableRep::Iterator* HashSkipListRep::GetIterator() {
|
||||
auto list = new Bucket(compare_, arena_);
|
||||
// allocate a new arena of similar size to the one currently in use
|
||||
Arena* new_arena = new Arena(arena_->BlockSize());
|
||||
auto list = new Bucket(compare_, new_arena);
|
||||
for (size_t i = 0; i < bucket_size_; ++i) {
|
||||
auto bucket = GetBucket(i);
|
||||
if (bucket != nullptr) {
|
||||
@ -299,7 +301,7 @@ MemTableRep::Iterator* HashSkipListRep::GetIterator() {
|
||||
}
|
||||
}
|
||||
}
|
||||
return new Iterator(list);
|
||||
return new Iterator(list, true, new_arena);
|
||||
}
|
||||
|
||||
MemTableRep::Iterator* HashSkipListRep::GetPrefixIterator(const Slice& prefix) {
|
||||
|
Loading…
Reference in New Issue
Block a user