2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <string>
|
2013-07-23 23:42:27 +02:00
|
|
|
#include <memory>
|
2014-09-11 03:46:09 +02:00
|
|
|
#include <functional>
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
#include <deque>
|
2014-09-11 03:46:09 +02:00
|
|
|
#include <vector>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/skiplist.h"
|
2014-01-24 23:30:28 +01:00
|
|
|
#include "db/version_edit.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/db.h"
|
2015-03-03 19:59:36 +01:00
|
|
|
#include "rocksdb/env.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/memtablerep.h"
|
2014-09-09 03:46:52 +02:00
|
|
|
#include "rocksdb/immutable_options.h"
|
2014-12-02 21:09:20 +01:00
|
|
|
#include "db/memtable_allocator.h"
|
2014-01-31 02:18:17 +01:00
|
|
|
#include "util/arena.h"
|
2013-11-27 23:27:02 +01:00
|
|
|
#include "util/dynamic_bloom.h"
|
2014-09-17 21:49:13 +02:00
|
|
|
#include "util/mutable_cf_options.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class Mutex;
|
|
|
|
class MemTableIterator;
|
2013-12-03 03:34:05 +01:00
|
|
|
class MergeContext;
|
2014-12-02 21:09:20 +01:00
|
|
|
class WriteBuffer;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-09-09 03:46:52 +02:00
|
|
|
struct MemTableOptions {
|
2014-09-17 21:49:13 +02:00
|
|
|
explicit MemTableOptions(
|
2014-10-27 20:10:13 +01:00
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const MutableCFOptions& mutable_cf_options);
|
2014-09-09 03:46:52 +02:00
|
|
|
size_t write_buffer_size;
|
|
|
|
size_t arena_block_size;
|
|
|
|
uint32_t memtable_prefix_bloom_bits;
|
|
|
|
uint32_t memtable_prefix_bloom_probes;
|
|
|
|
size_t memtable_prefix_bloom_huge_page_tlb_size;
|
|
|
|
bool inplace_update_support;
|
|
|
|
size_t inplace_update_num_locks;
|
|
|
|
UpdateStatus (*inplace_callback)(char* existing_value,
|
|
|
|
uint32_t* existing_value_size,
|
|
|
|
Slice delta_value,
|
|
|
|
std::string* merged_value);
|
|
|
|
size_t max_successive_merges;
|
|
|
|
bool filter_deletes;
|
2014-10-27 20:10:13 +01:00
|
|
|
Statistics* statistics;
|
|
|
|
MergeOperator* merge_operator;
|
|
|
|
Logger* info_log;
|
2014-09-09 03:46:52 +02:00
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class MemTable {
|
|
|
|
public:
|
2013-07-23 23:42:27 +02:00
|
|
|
struct KeyComparator : public MemTableRep::KeyComparator {
|
|
|
|
const InternalKeyComparator comparator;
|
|
|
|
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
|
2014-01-25 02:50:59 +01:00
|
|
|
virtual int operator()(const char* prefix_len_key1,
|
2015-02-26 20:28:41 +01:00
|
|
|
const char* prefix_len_key2) const override;
|
2014-01-25 02:50:59 +01:00
|
|
|
virtual int operator()(const char* prefix_len_key,
|
|
|
|
const Slice& key) const override;
|
2013-07-23 23:42:27 +02:00
|
|
|
};
|
|
|
|
|
2011-05-21 04:17:43 +02:00
|
|
|
// MemTables are reference counted. The initial reference count
|
|
|
|
// is zero and the caller must call Ref() at least once.
|
2014-01-15 00:27:09 +01:00
|
|
|
explicit MemTable(const InternalKeyComparator& comparator,
|
2014-09-09 03:46:52 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-12-02 21:09:20 +01:00
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
WriteBuffer* write_buffer);
|
2011-05-21 04:17:43 +02:00
|
|
|
|
2013-11-25 20:55:36 +01:00
|
|
|
~MemTable();
|
|
|
|
|
2011-05-21 04:17:43 +02:00
|
|
|
// Increase reference count.
|
|
|
|
void Ref() { ++refs_; }
|
|
|
|
|
2013-11-25 20:55:36 +01:00
|
|
|
// Drop reference count.
|
|
|
|
// If the refcount goes to zero return this memtable, otherwise return null
|
|
|
|
MemTable* Unref() {
|
2011-05-21 04:17:43 +02:00
|
|
|
--refs_;
|
|
|
|
assert(refs_ >= 0);
|
|
|
|
if (refs_ <= 0) {
|
2013-11-25 20:55:36 +01:00
|
|
|
return this;
|
2011-05-21 04:17:43 +02:00
|
|
|
}
|
2013-11-25 20:55:36 +01:00
|
|
|
return nullptr;
|
2011-05-21 04:17:43 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Returns an estimate of the number of bytes of data in use by this
|
|
|
|
// data structure.
|
|
|
|
//
|
|
|
|
// REQUIRES: external synchronization to prevent simultaneous
|
|
|
|
// operations on the same MemTable.
|
|
|
|
size_t ApproximateMemoryUsage();
|
|
|
|
|
2014-03-13 00:40:14 +01:00
|
|
|
// This method heuristically determines if the memtable should continue to
|
|
|
|
// host more data.
|
2014-09-11 03:46:09 +02:00
|
|
|
bool ShouldScheduleFlush() const {
|
|
|
|
return flush_scheduled_ == false && should_flush_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MarkFlushScheduled() { flush_scheduled_ = true; }
|
2014-03-13 00:40:14 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return an iterator that yields the contents of the memtable.
|
|
|
|
//
|
|
|
|
// The caller must ensure that the underlying MemTable remains live
|
|
|
|
// while the returned iterator is live. The keys returned by this
|
|
|
|
// iterator are internal keys encoded by AppendInternalKey in the
|
2013-01-04 02:13:56 +01:00
|
|
|
// db/dbformat.{h,cc} module.
|
2013-08-23 08:10:02 +02:00
|
|
|
//
|
2014-04-25 21:21:34 +02:00
|
|
|
// By default, it returns an iterator for prefix seek if prefix_extractor
|
|
|
|
// is configured in Options.
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
// arena: If not null, the arena needs to be used to allocate the Iterator.
|
|
|
|
// Calling ~Iterator of the iterator will destroy all the states but
|
|
|
|
// those allocated in arena.
|
2014-09-09 03:46:52 +02:00
|
|
|
Iterator* NewIterator(const ReadOptions& read_options, Arena* arena);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Add an entry into memtable that maps key to value at the
|
|
|
|
// specified sequence number and with the specified type.
|
|
|
|
// Typically value will be empty if type==kTypeDeletion.
|
|
|
|
void Add(SequenceNumber seq, ValueType type,
|
|
|
|
const Slice& key,
|
|
|
|
const Slice& value);
|
|
|
|
|
2011-06-22 04:36:45 +02:00
|
|
|
// If memtable contains a value for key, store it in *value and return true.
|
|
|
|
// If memtable contains a deletion for key, store a NotFound() error
|
|
|
|
// in *status and return true.
|
2013-03-21 23:59:47 +01:00
|
|
|
// If memtable contains Merge operation as the most recent entry for a key,
|
2013-07-26 21:57:01 +02:00
|
|
|
// and the merge process does not stop (not reaching a value or delete),
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
// prepend the current merge operand to *operands.
|
|
|
|
// store MergeInProgress in s, and return false.
|
2011-06-22 04:36:45 +02:00
|
|
|
// Else, return false.
|
2013-03-21 23:59:47 +01:00
|
|
|
bool Get(const LookupKey& key, std::string* value, Status* s,
|
2014-09-09 02:45:06 +02:00
|
|
|
MergeContext* merge_context);
|
2011-06-22 04:36:45 +02:00
|
|
|
|
2014-01-14 16:55:16 +01:00
|
|
|
// Attempts to update the new_value inplace, else does normal Add
|
|
|
|
// Pseudocode
|
|
|
|
// if key exists in current memtable && prev_value is of type kTypeValue
|
|
|
|
// if new sizeof(new_value) <= sizeof(prev_value)
|
|
|
|
// update inplace
|
|
|
|
// else add(key, new_value)
|
|
|
|
// else add(key, new_value)
|
|
|
|
void Update(SequenceNumber seq,
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 23:12:47 +02:00
|
|
|
const Slice& key,
|
|
|
|
const Slice& value);
|
|
|
|
|
2014-01-14 16:55:16 +01:00
|
|
|
// If prev_value for key exits, attempts to update it inplace.
|
|
|
|
// else returns false
|
|
|
|
// Pseudocode
|
|
|
|
// if key exists in current memtable && prev_value is of type kTypeValue
|
|
|
|
// new_value = delta(prev_value)
|
|
|
|
// if sizeof(new_value) <= sizeof(prev_value)
|
|
|
|
// update inplace
|
|
|
|
// else add(key, new_value)
|
|
|
|
// else return false
|
|
|
|
bool UpdateCallback(SequenceNumber seq,
|
|
|
|
const Slice& key,
|
2014-09-09 03:46:52 +02:00
|
|
|
const Slice& delta);
|
2014-01-14 16:55:16 +01:00
|
|
|
|
2014-01-11 02:33:56 +01:00
|
|
|
// Returns the number of successive merge entries starting from the newest
|
|
|
|
// entry for the key up to the last non-merge entry or last entry for the
|
|
|
|
// key in the memtable.
|
|
|
|
size_t CountSuccessiveMergeEntries(const LookupKey& key);
|
|
|
|
|
2014-04-23 02:17:33 +02:00
|
|
|
// Get total number of entries in the mem table.
|
2015-03-19 00:11:02 +01:00
|
|
|
uint64_t num_entries() const { return num_entries_; }
|
|
|
|
|
|
|
|
uint64_t num_deletes() const { return num_deletes_; }
|
2014-04-23 02:17:33 +02:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// Returns the edits area that is needed for flushing the memtable
|
|
|
|
VersionEdit* GetEdits() { return &edit_; }
|
|
|
|
|
2014-09-05 21:01:01 +02:00
|
|
|
// Returns if there is no entry inserted to the mem table.
|
|
|
|
bool IsEmpty() const { return first_seqno_ == 0; }
|
|
|
|
|
2013-02-28 23:09:30 +01:00
|
|
|
// Returns the sequence number of the first element that was inserted
|
|
|
|
// into the memtable
|
|
|
|
SequenceNumber GetFirstSequenceNumber() { return first_seqno_; }
|
|
|
|
|
2013-07-16 20:56:46 +02:00
|
|
|
// Returns the next active logfile number when this memtable is about to
|
|
|
|
// be flushed to storage
|
|
|
|
uint64_t GetNextLogNumber() { return mem_next_logfile_number_; }
|
|
|
|
|
|
|
|
// Sets the next active logfile number when this memtable is about to
|
|
|
|
// be flushed to storage
|
|
|
|
void SetNextLogNumber(uint64_t num) { mem_next_logfile_number_ = num; }
|
|
|
|
|
2013-08-23 08:10:02 +02:00
|
|
|
// Notify the underlying storage that no more items will be added
|
2014-12-02 21:09:20 +01:00
|
|
|
void MarkImmutable() {
|
|
|
|
table_->MarkReadOnly();
|
|
|
|
allocator_.DoneAllocating();
|
|
|
|
}
|
2013-08-23 08:10:02 +02:00
|
|
|
|
Add a new mem-table representation based on cuckoo hash.
Summary:
= Major Changes =
* Add a new mem-table representation, HashCuckooRep, which is based cuckoo hash.
Cuckoo hash uses multiple hash functions. This allows each key to have multiple
possible locations in the mem-table.
- Put: When insert a key, it will try to find whether one of its possible
locations is vacant and store the key. If none of its possible
locations are available, then it will kick out a victim key and
store at that location. The kicked-out victim key will then be
stored at a vacant space of its possible locations or kick-out
another victim. In this diff, the kick-out path (known as
cuckoo-path) is found using BFS, which guarantees to be the shortest.
- Get: Simply tries all possible locations of a key --- this guarantees
worst-case constant time complexity.
- Time complexity: O(1) for Get, and average O(1) for Put if the
fullness of the mem-table is below 80%.
- Default using two hash functions, the number of hash functions used
by the cuckoo-hash may dynamically increase if it fails to find a
short-enough kick-out path.
- Currently, HashCuckooRep does not support iteration and snapshots,
as our current main purpose of this is to optimize point access.
= Minor Changes =
* Add IsSnapshotSupported() to DB to indicate whether the current DB
supports snapshots. If it returns false, then DB::GetSnapshot() will
always return nullptr.
Test Plan:
Run existing tests. Will develop a test specifically for cuckoo hash in
the next diff.
Reviewers: sdong, haobo
Reviewed By: sdong
CC: leveldb, dhruba, igor
Differential Revision: https://reviews.facebook.net/D16155
2014-04-30 02:13:46 +02:00
|
|
|
// return true if the current MemTableRep supports merge operator.
|
|
|
|
bool IsMergeOperatorSupported() const {
|
|
|
|
return table_->IsMergeOperatorSupported();
|
|
|
|
}
|
|
|
|
|
|
|
|
// return true if the current MemTableRep supports snapshots.
|
2015-02-03 21:19:56 +01:00
|
|
|
// inplace update prevents snapshots,
|
|
|
|
bool IsSnapshotSupported() const {
|
|
|
|
return table_->IsSnapshotSupported() && !moptions_.inplace_update_support;
|
|
|
|
}
|
Add a new mem-table representation based on cuckoo hash.
Summary:
= Major Changes =
* Add a new mem-table representation, HashCuckooRep, which is based cuckoo hash.
Cuckoo hash uses multiple hash functions. This allows each key to have multiple
possible locations in the mem-table.
- Put: When insert a key, it will try to find whether one of its possible
locations is vacant and store the key. If none of its possible
locations are available, then it will kick out a victim key and
store at that location. The kicked-out victim key will then be
stored at a vacant space of its possible locations or kick-out
another victim. In this diff, the kick-out path (known as
cuckoo-path) is found using BFS, which guarantees to be the shortest.
- Get: Simply tries all possible locations of a key --- this guarantees
worst-case constant time complexity.
- Time complexity: O(1) for Get, and average O(1) for Put if the
fullness of the mem-table is below 80%.
- Default using two hash functions, the number of hash functions used
by the cuckoo-hash may dynamically increase if it fails to find a
short-enough kick-out path.
- Currently, HashCuckooRep does not support iteration and snapshots,
as our current main purpose of this is to optimize point access.
= Minor Changes =
* Add IsSnapshotSupported() to DB to indicate whether the current DB
supports snapshots. If it returns false, then DB::GetSnapshot() will
always return nullptr.
Test Plan:
Run existing tests. Will develop a test specifically for cuckoo hash in
the next diff.
Reviewers: sdong, haobo
Reviewed By: sdong
CC: leveldb, dhruba, igor
Differential Revision: https://reviews.facebook.net/D16155
2014-04-30 02:13:46 +02:00
|
|
|
|
2014-02-11 18:46:30 +01:00
|
|
|
// Get the lock associated for the key
|
|
|
|
port::RWMutex* GetLock(const Slice& key);
|
|
|
|
|
|
|
|
const InternalKeyComparator& GetInternalKeyComparator() const {
|
|
|
|
return comparator_.comparator;
|
|
|
|
}
|
|
|
|
|
2014-09-09 03:46:52 +02:00
|
|
|
const MemTableOptions* GetMemTableOptions() const { return &moptions_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2014-09-11 03:46:09 +02:00
|
|
|
// Dynamically check if we can add more incoming entries
|
2014-03-13 00:40:14 +01:00
|
|
|
bool ShouldFlushNow() const;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
friend class MemTableIterator;
|
|
|
|
friend class MemTableBackwardIterator;
|
2012-10-19 23:00:53 +02:00
|
|
|
friend class MemTableList;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
KeyComparator comparator_;
|
2014-09-09 03:46:52 +02:00
|
|
|
const MemTableOptions moptions_;
|
2011-05-21 04:17:43 +02:00
|
|
|
int refs_;
|
2014-03-13 00:40:14 +01:00
|
|
|
const size_t kArenaBlockSize;
|
2014-01-31 02:18:17 +01:00
|
|
|
Arena arena_;
|
2014-12-02 21:09:20 +01:00
|
|
|
MemTableAllocator allocator_;
|
2014-01-16 03:17:58 +01:00
|
|
|
unique_ptr<MemTableRep> table_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-04-23 02:17:33 +02:00
|
|
|
uint64_t num_entries_;
|
2015-03-19 00:11:02 +01:00
|
|
|
uint64_t num_deletes_;
|
2014-04-23 02:17:33 +02:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// These are used to manage memtable flushes to storage
|
2012-11-29 01:42:36 +01:00
|
|
|
bool flush_in_progress_; // started the flush
|
2012-10-19 23:00:53 +02:00
|
|
|
bool flush_completed_; // finished the flush
|
|
|
|
uint64_t file_number_; // filled up after flush is complete
|
|
|
|
|
2014-01-14 16:55:16 +01:00
|
|
|
// The updates to be applied to the transaction log when this
|
2012-10-19 23:00:53 +02:00
|
|
|
// memtable is flushed to storage.
|
|
|
|
VersionEdit edit_;
|
|
|
|
|
2013-02-28 23:09:30 +01:00
|
|
|
// The sequence number of the kv that was inserted first
|
|
|
|
SequenceNumber first_seqno_;
|
|
|
|
|
2013-06-11 23:23:58 +02:00
|
|
|
// The log files earlier than this number can be deleted.
|
2013-07-16 20:56:46 +02:00
|
|
|
uint64_t mem_next_logfile_number_;
|
|
|
|
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 23:12:47 +02:00
|
|
|
// rw locks for inplace updates
|
|
|
|
std::vector<port::RWMutex> locks_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// No copying allowed
|
|
|
|
MemTable(const MemTable&);
|
|
|
|
void operator=(const MemTable&);
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 23:12:47 +02:00
|
|
|
|
2013-11-27 23:27:02 +01:00
|
|
|
const SliceTransform* const prefix_extractor_;
|
|
|
|
std::unique_ptr<DynamicBloom> prefix_bloom_;
|
2014-03-13 00:40:14 +01:00
|
|
|
|
|
|
|
// a flag indicating if a memtable has met the criteria to flush
|
|
|
|
bool should_flush_;
|
2014-09-11 03:46:09 +02:00
|
|
|
|
|
|
|
// a flag indicating if flush has been scheduled
|
|
|
|
bool flush_scheduled_;
|
2015-03-03 19:59:36 +01:00
|
|
|
Env* env_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-11-21 04:49:27 +01:00
|
|
|
extern const char* EncodeKey(std::string* scratch, const Slice& target);
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|