2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-04-10 03:01:11 +02:00
|
|
|
|
2016-06-21 03:01:03 +02:00
|
|
|
#include "db/memtable_list.h"
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
#include <algorithm>
|
2015-04-10 03:01:11 +02:00
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
#include "db/merge_context.h"
|
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "db/write_controller.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/status.h"
|
2016-06-21 03:01:03 +02:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2015-06-08 20:43:55 +02:00
|
|
|
#include "util/string_util.h"
|
2015-04-10 03:01:11 +02:00
|
|
|
#include "util/testharness.h"
|
2016-06-21 03:01:03 +02:00
|
|
|
#include "util/testutil.h"
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class MemTableListTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
std::string dbname;
|
|
|
|
DB* db;
|
|
|
|
Options options;
|
2018-10-16 04:59:20 +02:00
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
std::atomic<uint64_t> file_number;
|
2015-04-10 03:01:11 +02:00
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
MemTableListTest() : db(nullptr), file_number(1) {
|
2018-07-14 02:18:39 +02:00
|
|
|
dbname = test::PerThreadDBPath("memtable_list_test");
|
2018-10-16 04:59:20 +02:00
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyDB(dbname, options);
|
2015-04-10 03:01:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a test db if not yet created
|
|
|
|
void CreateDB() {
|
2015-04-10 07:11:35 +02:00
|
|
|
if (db == nullptr) {
|
2015-04-10 03:01:11 +02:00
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyDB(dbname, options);
|
2018-10-16 04:59:20 +02:00
|
|
|
// Open DB only with default column family
|
|
|
|
ColumnFamilyOptions cf_options;
|
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
|
|
|
cf_descs.emplace_back(kDefaultColumnFamilyName, cf_options);
|
|
|
|
Status s = DB::Open(options, dbname, cf_descs, &handles, &db);
|
2015-04-10 03:01:11 +02:00
|
|
|
EXPECT_OK(s);
|
2018-10-16 04:59:20 +02:00
|
|
|
|
|
|
|
ColumnFamilyOptions cf_opt1, cf_opt2;
|
|
|
|
cf_opt1.cf_paths.emplace_back(dbname + "_one_1",
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
cf_opt2.cf_paths.emplace_back(dbname + "_two_1",
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
int sz = static_cast<int>(handles.size());
|
|
|
|
handles.resize(sz + 2);
|
|
|
|
s = db->CreateColumnFamily(cf_opt1, "one", &handles[1]);
|
|
|
|
EXPECT_OK(s);
|
|
|
|
s = db->CreateColumnFamily(cf_opt2, "two", &handles[2]);
|
|
|
|
EXPECT_OK(s);
|
|
|
|
|
|
|
|
cf_descs.emplace_back("one", cf_options);
|
|
|
|
cf_descs.emplace_back("two", cf_options);
|
2015-04-10 03:01:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~MemTableListTest() {
|
|
|
|
if (db) {
|
2018-10-16 04:59:20 +02:00
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs(handles.size());
|
|
|
|
for (int i = 0; i != static_cast<int>(handles.size()); ++i) {
|
|
|
|
handles[i]->GetDescriptor(&cf_descs[i]);
|
|
|
|
}
|
|
|
|
for (auto h : handles) {
|
|
|
|
if (h) {
|
|
|
|
db->DestroyColumnFamilyHandle(h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
handles.clear();
|
2015-04-10 03:01:11 +02:00
|
|
|
delete db;
|
2018-10-16 04:59:20 +02:00
|
|
|
db = nullptr;
|
|
|
|
DestroyDB(dbname, options, cf_descs);
|
2015-04-10 03:01:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-06 00:37:45 +02:00
|
|
|
// Calls MemTableList::TryInstallMemtableFlushResults() and sets up all
|
2015-04-10 03:01:11 +02:00
|
|
|
// structures needed to call this function.
|
|
|
|
Status Mock_InstallMemtableFlushResults(
|
|
|
|
MemTableList* list, const MutableCFOptions& mutable_cf_options,
|
|
|
|
const autovector<MemTable*>& m, autovector<MemTable*>* to_delete) {
|
2019-01-04 05:53:52 +01:00
|
|
|
// Create a mock Logger
|
|
|
|
test::NullLogger logger;
|
|
|
|
LogBuffer log_buffer(DEBUG_LEVEL, &logger);
|
|
|
|
|
|
|
|
CreateDB();
|
|
|
|
// Create a mock VersionSet
|
|
|
|
DBOptions db_options;
|
|
|
|
ImmutableDBOptions immutable_db_options(db_options);
|
|
|
|
EnvOptions env_options;
|
|
|
|
std::shared_ptr<Cache> table_cache(NewLRUCache(50000, 16));
|
|
|
|
WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size);
|
|
|
|
WriteController write_controller(10000000u);
|
|
|
|
|
|
|
|
VersionSet versions(dbname, &immutable_db_options, env_options,
|
|
|
|
table_cache.get(), &write_buffer_manager,
|
|
|
|
&write_controller);
|
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
|
|
|
cf_descs.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions());
|
|
|
|
cf_descs.emplace_back("one", ColumnFamilyOptions());
|
|
|
|
cf_descs.emplace_back("two", ColumnFamilyOptions());
|
|
|
|
|
|
|
|
EXPECT_OK(versions.Recover(cf_descs, false));
|
|
|
|
|
|
|
|
// Create mock default ColumnFamilyData
|
|
|
|
auto column_family_set = versions.GetColumnFamilySet();
|
|
|
|
LogsWithPrepTracker dummy_prep_tracker;
|
|
|
|
auto cfd = column_family_set->GetDefault();
|
|
|
|
EXPECT_TRUE(nullptr != cfd);
|
|
|
|
uint64_t file_num = file_number.fetch_add(1);
|
|
|
|
// Create dummy mutex.
|
|
|
|
InstrumentedMutex mutex;
|
|
|
|
InstrumentedMutexLock l(&mutex);
|
|
|
|
return list->TryInstallMemtableFlushResults(
|
|
|
|
cfd, mutable_cf_options, m, &dummy_prep_tracker, &versions, &mutex,
|
|
|
|
file_num, to_delete, nullptr, &log_buffer);
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Calls MemTableList::InstallMemtableFlushResults() and sets up all
|
|
|
|
// structures needed to call this function.
|
2019-01-04 05:53:52 +01:00
|
|
|
Status Mock_InstallMemtableAtomicFlushResults(
|
2018-10-16 04:59:20 +02:00
|
|
|
autovector<MemTableList*>& lists, const autovector<uint32_t>& cf_ids,
|
|
|
|
const autovector<const MutableCFOptions*>& mutable_cf_options_list,
|
|
|
|
const autovector<const autovector<MemTable*>*>& mems_list,
|
|
|
|
autovector<MemTable*>* to_delete) {
|
2015-04-10 03:01:11 +02:00
|
|
|
// Create a mock Logger
|
2015-08-05 16:33:27 +02:00
|
|
|
test::NullLogger logger;
|
2015-04-10 03:01:11 +02:00
|
|
|
LogBuffer log_buffer(DEBUG_LEVEL, &logger);
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
CreateDB();
|
2015-04-10 03:01:11 +02:00
|
|
|
// Create a mock VersionSet
|
|
|
|
DBOptions db_options;
|
2016-09-24 01:34:04 +02:00
|
|
|
ImmutableDBOptions immutable_db_options(db_options);
|
2015-04-10 03:01:11 +02:00
|
|
|
EnvOptions env_options;
|
2018-11-09 20:17:34 +01:00
|
|
|
std::shared_ptr<Cache> table_cache(NewLRUCache(50000, 16));
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size);
|
2015-05-16 00:52:51 +02:00
|
|
|
WriteController write_controller(10000000u);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
2016-09-24 01:34:04 +02:00
|
|
|
VersionSet versions(dbname, &immutable_db_options, env_options,
|
|
|
|
table_cache.get(), &write_buffer_manager,
|
|
|
|
&write_controller);
|
2018-10-16 04:59:20 +02:00
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
|
|
|
cf_descs.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions());
|
|
|
|
cf_descs.emplace_back("one", ColumnFamilyOptions());
|
|
|
|
cf_descs.emplace_back("two", ColumnFamilyOptions());
|
|
|
|
EXPECT_OK(versions.Recover(cf_descs, false));
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Create mock default ColumnFamilyData
|
|
|
|
|
|
|
|
auto column_family_set = versions.GetColumnFamilySet();
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
LogsWithPrepTracker dummy_prep_tracker;
|
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
for (int i = 0; i != static_cast<int>(cf_ids.size()); ++i) {
|
|
|
|
cfds.emplace_back(column_family_set->GetColumnFamily(cf_ids[i]));
|
|
|
|
EXPECT_NE(nullptr, cfds[i]);
|
|
|
|
}
|
|
|
|
autovector<FileMetaData> file_metas;
|
|
|
|
for (size_t i = 0; i != cf_ids.size(); ++i) {
|
|
|
|
FileMetaData meta;
|
|
|
|
uint64_t file_num = file_number.fetch_add(1);
|
|
|
|
meta.fd = FileDescriptor(file_num, 0, 0);
|
|
|
|
file_metas.emplace_back(meta);
|
|
|
|
}
|
2015-04-10 03:01:11 +02:00
|
|
|
InstrumentedMutex mutex;
|
|
|
|
InstrumentedMutexLock l(&mutex);
|
2019-01-04 05:53:52 +01:00
|
|
|
return InstallMemtableAtomicFlushResults(
|
|
|
|
&lists, cfds, mutable_cf_options_list, mems_list, &versions, &mutex,
|
|
|
|
file_metas, to_delete, nullptr, &log_buffer);
|
2015-04-10 03:01:11 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(MemTableListTest, Empty) {
|
|
|
|
// Create an empty MemTableList and validate basic functions.
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
MemTableList list(1, 0);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(0, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
|
|
|
|
autovector<MemTable*> mems;
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &mems);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(0, mems.size());
|
2015-04-10 23:16:03 +02:00
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
list.current()->Unref(&to_delete);
|
|
|
|
ASSERT_EQ(0, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemTableListTest, GetTest) {
|
|
|
|
// Create MemTableList
|
|
|
|
int min_write_buffer_number_to_merge = 2;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
int max_write_buffer_number_to_maintain = 0;
|
|
|
|
MemTableList list(min_write_buffer_number_to_merge,
|
|
|
|
max_write_buffer_number_to_maintain);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
SequenceNumber seq = 1;
|
|
|
|
std::string value;
|
|
|
|
Status s;
|
|
|
|
MergeContext merge_context;
|
2016-11-04 02:40:23 +01:00
|
|
|
InternalKeyComparator ikey_cmp(options.comparator);
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
SequenceNumber max_covering_tombstone_seq = 0;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
autovector<MemTable*> to_delete;
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
LookupKey lkey("key1", seq);
|
2016-11-04 02:40:23 +01:00
|
|
|
bool found = list.current()->Get(lkey, &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
// Create a MemTable
|
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
auto factory = std::make_shared<SkipListFactory>();
|
|
|
|
options.memtable_factory = factory;
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
2016-09-14 06:11:59 +02:00
|
|
|
MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2015-04-10 03:01:11 +02:00
|
|
|
mem->Ref();
|
|
|
|
|
|
|
|
// Write some keys to this memtable.
|
|
|
|
mem->Add(++seq, kTypeDeletion, "key1", "");
|
|
|
|
mem->Add(++seq, kTypeValue, "key2", "value2");
|
|
|
|
mem->Add(++seq, kTypeValue, "key1", "value1");
|
|
|
|
mem->Add(++seq, kTypeValue, "key2", "value2.2");
|
|
|
|
|
|
|
|
// Fetch the newly written keys
|
|
|
|
merge_context.Clear();
|
2016-11-04 02:40:23 +01:00
|
|
|
found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_TRUE(s.ok() && found);
|
|
|
|
ASSERT_EQ(value, "value1");
|
|
|
|
|
|
|
|
merge_context.Clear();
|
2016-11-04 02:40:23 +01:00
|
|
|
found = mem->Get(LookupKey("key1", 2), &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
// MemTable found out that this key is *not* found (at this sequence#)
|
|
|
|
ASSERT_TRUE(found && s.IsNotFound());
|
|
|
|
|
|
|
|
merge_context.Clear();
|
2016-11-04 02:40:23 +01:00
|
|
|
found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_TRUE(s.ok() && found);
|
|
|
|
ASSERT_EQ(value, "value2.2");
|
|
|
|
|
|
|
|
ASSERT_EQ(4, mem->num_entries());
|
|
|
|
ASSERT_EQ(1, mem->num_deletes());
|
|
|
|
|
|
|
|
// Add memtable to list
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
list.Add(mem, &to_delete);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
SequenceNumber saved_seq = seq;
|
|
|
|
|
|
|
|
// Create another memtable and write some keys to it
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb2(options.db_write_buffer_size);
|
2016-09-14 06:11:59 +02:00
|
|
|
MemTable* mem2 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb2,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2015-04-10 03:01:11 +02:00
|
|
|
mem2->Ref();
|
|
|
|
|
|
|
|
mem2->Add(++seq, kTypeDeletion, "key1", "");
|
|
|
|
mem2->Add(++seq, kTypeValue, "key2", "value2.3");
|
|
|
|
|
|
|
|
// Add second memtable to list
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
list.Add(mem2, &to_delete);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Fetch keys via MemTableList
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_TRUE(found && s.IsNotFound());
|
|
|
|
|
|
|
|
merge_context.Clear();
|
|
|
|
found = list.current()->Get(LookupKey("key1", saved_seq), &value, &s,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&merge_context, &max_covering_tombstone_seq,
|
|
|
|
ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_TRUE(s.ok() && found);
|
|
|
|
ASSERT_EQ("value1", value);
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_TRUE(s.ok() && found);
|
|
|
|
ASSERT_EQ(value, "value2.3");
|
|
|
|
|
|
|
|
merge_context.Clear();
|
2016-11-04 02:40:23 +01:00
|
|
|
found = list.current()->Get(LookupKey("key2", 1), &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(2, list.NumNotFlushed());
|
|
|
|
|
|
|
|
list.current()->Unref(&to_delete);
|
|
|
|
for (MemTable* m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemTableListTest, GetFromHistoryTest) {
|
|
|
|
// Create MemTableList
|
|
|
|
int min_write_buffer_number_to_merge = 2;
|
|
|
|
int max_write_buffer_number_to_maintain = 2;
|
|
|
|
MemTableList list(min_write_buffer_number_to_merge,
|
|
|
|
max_write_buffer_number_to_maintain);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
SequenceNumber seq = 1;
|
|
|
|
std::string value;
|
|
|
|
Status s;
|
|
|
|
MergeContext merge_context;
|
2016-11-04 02:40:23 +01:00
|
|
|
InternalKeyComparator ikey_cmp(options.comparator);
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
SequenceNumber max_covering_tombstone_seq = 0;
|
2015-04-10 03:01:11 +02:00
|
|
|
autovector<MemTable*> to_delete;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
|
|
|
|
LookupKey lkey("key1", seq);
|
2016-11-04 02:40:23 +01:00
|
|
|
bool found = list.current()->Get(lkey, &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
// Create a MemTable
|
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
auto factory = std::make_shared<SkipListFactory>();
|
|
|
|
options.memtable_factory = factory;
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
2016-09-14 06:11:59 +02:00
|
|
|
MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
mem->Ref();
|
|
|
|
|
|
|
|
// Write some keys to this memtable.
|
|
|
|
mem->Add(++seq, kTypeDeletion, "key1", "");
|
|
|
|
mem->Add(++seq, kTypeValue, "key2", "value2");
|
|
|
|
mem->Add(++seq, kTypeValue, "key2", "value2.2");
|
|
|
|
|
|
|
|
// Fetch the newly written keys
|
|
|
|
merge_context.Clear();
|
2016-11-04 02:40:23 +01:00
|
|
|
found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
// MemTable found out that this key is *not* found (at this sequence#)
|
|
|
|
ASSERT_TRUE(found && s.IsNotFound());
|
|
|
|
|
|
|
|
merge_context.Clear();
|
2016-11-04 02:40:23 +01:00
|
|
|
found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(s.ok() && found);
|
|
|
|
ASSERT_EQ(value, "value2.2");
|
|
|
|
|
|
|
|
// Add memtable to list
|
|
|
|
list.Add(mem, &to_delete);
|
|
|
|
ASSERT_EQ(0, to_delete.size());
|
|
|
|
|
|
|
|
// Fetch keys via MemTableList
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(found && s.IsNotFound());
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(s.ok() && found);
|
|
|
|
ASSERT_EQ("value2.2", value);
|
|
|
|
|
|
|
|
// Flush this memtable from the list.
|
|
|
|
// (It will then be a part of the memtable history).
|
|
|
|
autovector<MemTable*> to_flush;
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(1, to_flush.size());
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
MutableCFOptions mutable_cf_options(options);
|
|
|
|
s = Mock_InstallMemtableFlushResults(&list, mutable_cf_options, to_flush,
|
|
|
|
&to_delete);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ(0, list.NumNotFlushed());
|
|
|
|
ASSERT_EQ(1, list.NumFlushed());
|
|
|
|
ASSERT_EQ(0, to_delete.size());
|
|
|
|
|
|
|
|
// Verify keys are no longer in MemTableList
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
// Verify keys are present in history
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found = list.current()->GetFromHistory(
|
|
|
|
LookupKey("key1", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(found && s.IsNotFound());
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found = list.current()->GetFromHistory(
|
|
|
|
LookupKey("key2", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(found);
|
|
|
|
ASSERT_EQ("value2.2", value);
|
|
|
|
|
|
|
|
// Create another memtable and write some keys to it
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb2(options.db_write_buffer_size);
|
2016-09-14 06:11:59 +02:00
|
|
|
MemTable* mem2 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb2,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
mem2->Ref();
|
|
|
|
|
|
|
|
mem2->Add(++seq, kTypeDeletion, "key1", "");
|
|
|
|
mem2->Add(++seq, kTypeValue, "key3", "value3");
|
|
|
|
|
|
|
|
// Add second memtable to list
|
|
|
|
list.Add(mem2, &to_delete);
|
|
|
|
ASSERT_EQ(0, to_delete.size());
|
|
|
|
|
|
|
|
to_flush.clear();
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(1, to_flush.size());
|
|
|
|
|
|
|
|
// Flush second memtable
|
2018-10-16 04:59:20 +02:00
|
|
|
s = Mock_InstallMemtableFlushResults(&list, mutable_cf_options, to_flush,
|
|
|
|
&to_delete);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ(0, list.NumNotFlushed());
|
|
|
|
ASSERT_EQ(2, list.NumFlushed());
|
|
|
|
ASSERT_EQ(0, to_delete.size());
|
|
|
|
|
|
|
|
// Add a third memtable to push the first memtable out of the history
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb3(options.db_write_buffer_size);
|
2016-09-14 06:11:59 +02:00
|
|
|
MemTable* mem3 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb3,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
mem3->Ref();
|
|
|
|
list.Add(mem3, &to_delete);
|
|
|
|
ASSERT_EQ(1, list.NumNotFlushed());
|
|
|
|
ASSERT_EQ(1, list.NumFlushed());
|
|
|
|
ASSERT_EQ(1, to_delete.size());
|
|
|
|
|
|
|
|
// Verify keys are no longer in MemTableList
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key3", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
// Verify that the second memtable's keys are in the history
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found = list.current()->GetFromHistory(
|
|
|
|
LookupKey("key1", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(found && s.IsNotFound());
|
|
|
|
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found = list.current()->GetFromHistory(
|
|
|
|
LookupKey("key3", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_TRUE(found);
|
|
|
|
ASSERT_EQ("value3", value);
|
|
|
|
|
|
|
|
// Verify that key2 from the first memtable is no longer in the history
|
|
|
|
merge_context.Clear();
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 21:29:29 +02:00
|
|
|
found =
|
|
|
|
list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq, ReadOptions());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_FALSE(found);
|
|
|
|
|
|
|
|
// Cleanup
|
2015-04-10 03:01:11 +02:00
|
|
|
list.current()->Unref(&to_delete);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(3, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
for (MemTable* m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemTableListTest, FlushPendingTest) {
|
2018-10-16 04:59:20 +02:00
|
|
|
const int num_tables = 6;
|
2015-04-10 03:01:11 +02:00
|
|
|
SequenceNumber seq = 1;
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
auto factory = std::make_shared<SkipListFactory>();
|
|
|
|
options.memtable_factory = factory;
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
autovector<MemTable*> to_delete;
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Create MemTableList
|
|
|
|
int min_write_buffer_number_to_merge = 3;
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
int max_write_buffer_number_to_maintain = 7;
|
|
|
|
MemTableList list(min_write_buffer_number_to_merge,
|
|
|
|
max_write_buffer_number_to_maintain);
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Create some MemTables
|
2018-10-16 04:59:20 +02:00
|
|
|
uint64_t memtable_id = 0;
|
2015-04-10 03:01:11 +02:00
|
|
|
std::vector<MemTable*> tables;
|
2016-09-14 06:11:59 +02:00
|
|
|
MutableCFOptions mutable_cf_options(options);
|
2015-04-10 03:01:11 +02:00
|
|
|
for (int i = 0; i < num_tables; i++) {
|
2015-05-29 23:36:35 +02:00
|
|
|
MemTable* mem = new MemTable(cmp, ioptions, mutable_cf_options, &wb,
|
2017-06-02 21:08:01 +02:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2018-10-16 04:59:20 +02:00
|
|
|
mem->SetID(memtable_id++);
|
2015-04-10 03:01:11 +02:00
|
|
|
mem->Ref();
|
|
|
|
|
|
|
|
std::string value;
|
|
|
|
MergeContext merge_context;
|
|
|
|
|
2015-06-08 20:43:55 +02:00
|
|
|
mem->Add(++seq, kTypeValue, "key1", ToString(i));
|
|
|
|
mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN");
|
|
|
|
mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value");
|
|
|
|
mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM");
|
|
|
|
mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), "");
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
tables.push_back(mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to flush
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
autovector<MemTable*> to_flush;
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(0, to_flush.size());
|
|
|
|
|
|
|
|
// Request a flush even though there is nothing to flush
|
|
|
|
list.FlushRequested();
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Attempt to 'flush' to clear request for flush
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(0, to_flush.size());
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Request a flush again
|
|
|
|
list.FlushRequested();
|
|
|
|
// No flush pending since the list is empty.
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Add 2 tables
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
list.Add(tables[0], &to_delete);
|
|
|
|
list.Add(tables[1], &to_delete);
|
|
|
|
ASSERT_EQ(2, list.NumNotFlushed());
|
|
|
|
ASSERT_EQ(0, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Even though we have less than the minimum to flush, a flush is
|
|
|
|
// pending since we had previously requested a flush and never called
|
|
|
|
// PickMemtablesToFlush() to clear the flush.
|
|
|
|
ASSERT_TRUE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Pick tables to flush
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(2, to_flush.size());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(2, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Revert flush
|
|
|
|
list.RollbackMemtableFlush(to_flush, 0);
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
to_flush.clear();
|
|
|
|
|
|
|
|
// Add another table
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
list.Add(tables[2], &to_delete);
|
2015-04-10 03:01:11 +02:00
|
|
|
// We now have the minimum to flush regardles of whether FlushRequested()
|
|
|
|
// was called.
|
|
|
|
ASSERT_TRUE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(0, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Pick tables to flush
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(3, to_flush.size());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(3, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Pick tables to flush again
|
|
|
|
autovector<MemTable*> to_flush2;
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush2);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(0, to_flush2.size());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(3, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Add another table
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
list.Add(tables[3], &to_delete);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(0, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Request a flush again
|
|
|
|
list.FlushRequested();
|
|
|
|
ASSERT_TRUE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Pick tables to flush again
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush2);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(1, to_flush2.size());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(4, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Rollback first pick of tables
|
|
|
|
list.RollbackMemtableFlush(to_flush, 0);
|
|
|
|
ASSERT_TRUE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
to_flush.clear();
|
|
|
|
|
|
|
|
// Add another tables
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
list.Add(tables[4], &to_delete);
|
|
|
|
ASSERT_EQ(5, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
// We now have the minimum to flush regardles of whether FlushRequested()
|
|
|
|
ASSERT_TRUE(list.IsFlushPending());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(0, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Pick tables to flush
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush);
|
2015-04-10 03:01:11 +02:00
|
|
|
// Should pick 4 of 5 since 1 table has been picked in to_flush2
|
|
|
|
ASSERT_EQ(4, to_flush.size());
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(5, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Pick tables to flush again
|
|
|
|
autovector<MemTable*> to_flush3;
|
2018-10-16 04:59:20 +02:00
|
|
|
list.PickMemtablesToFlush(nullptr /* memtable_id */, &to_flush3);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(5, list.NumNotFlushed());
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Flush the 4 memtables that were picked in to_flush
|
2018-10-16 04:59:20 +02:00
|
|
|
s = Mock_InstallMemtableFlushResults(&list, mutable_cf_options, to_flush,
|
|
|
|
&to_delete);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
// Note: now to_flush contains tables[0,1,2,4]. to_flush2 contains
|
|
|
|
// tables[3].
|
|
|
|
// Current implementation will only commit memtables in the order they were
|
2018-10-06 00:37:45 +02:00
|
|
|
// created. So TryInstallMemtableFlushResults will install the first 3 tables
|
2015-04-10 03:01:11 +02:00
|
|
|
// in to_flush and stop when it encounters a table not yet flushed.
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(2, list.NumNotFlushed());
|
|
|
|
int num_in_history = std::min(3, max_write_buffer_number_to_maintain);
|
|
|
|
ASSERT_EQ(num_in_history, list.NumFlushed());
|
|
|
|
ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
|
|
|
|
// Request a flush again. Should be nothing to flush
|
|
|
|
list.FlushRequested();
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
|
|
|
|
// Flush the 1 memtable that was picked in to_flush2
|
|
|
|
s = MemTableListTest::Mock_InstallMemtableFlushResults(
|
2018-10-16 04:59:20 +02:00
|
|
|
&list, mutable_cf_options, to_flush2, &to_delete);
|
2015-04-10 03:01:11 +02:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
// This will actually install 2 tables. The 1 we told it to flush, and also
|
2015-04-10 03:01:11 +02:00
|
|
|
// tables[4] which has been waiting for tables[3] to commit.
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(0, list.NumNotFlushed());
|
|
|
|
num_in_history = std::min(5, max_write_buffer_number_to_maintain);
|
|
|
|
ASSERT_EQ(num_in_history, list.NumFlushed());
|
|
|
|
ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size());
|
2015-04-10 03:01:11 +02:00
|
|
|
|
2015-04-10 23:16:03 +02:00
|
|
|
for (const auto& m : to_delete) {
|
2018-10-06 00:37:45 +02:00
|
|
|
// Refcount should be 0 after calling TryInstallMemtableFlushResults.
|
2015-04-10 03:01:11 +02:00
|
|
|
// Verify this, by Ref'ing then UnRef'ing:
|
|
|
|
m->Ref();
|
|
|
|
ASSERT_EQ(m, m->Unref());
|
|
|
|
delete m;
|
|
|
|
}
|
2015-04-10 23:16:03 +02:00
|
|
|
to_delete.clear();
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
// Add another table
|
|
|
|
list.Add(tables[5], &to_delete);
|
|
|
|
ASSERT_EQ(1, list.NumNotFlushed());
|
|
|
|
ASSERT_EQ(5, list.GetLatestMemTableID());
|
|
|
|
memtable_id = 4;
|
|
|
|
// Pick tables to flush. The tables to pick must have ID smaller than or
|
|
|
|
// equal to 4. Therefore, no table will be selected in this case.
|
|
|
|
autovector<MemTable*> to_flush4;
|
|
|
|
list.FlushRequested();
|
|
|
|
ASSERT_TRUE(list.HasFlushRequested());
|
|
|
|
list.PickMemtablesToFlush(&memtable_id, &to_flush4);
|
|
|
|
ASSERT_TRUE(to_flush4.empty());
|
|
|
|
ASSERT_EQ(1, list.NumNotFlushed());
|
|
|
|
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
ASSERT_FALSE(list.HasFlushRequested());
|
|
|
|
|
|
|
|
// Pick tables to flush. The tables to pick must have ID smaller than or
|
|
|
|
// equal to 5. Therefore, only tables[5] will be selected.
|
|
|
|
memtable_id = 5;
|
|
|
|
list.FlushRequested();
|
|
|
|
list.PickMemtablesToFlush(&memtable_id, &to_flush4);
|
|
|
|
ASSERT_EQ(1, static_cast<int>(to_flush4.size()));
|
|
|
|
ASSERT_EQ(1, list.NumNotFlushed());
|
|
|
|
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
ASSERT_FALSE(list.IsFlushPending());
|
|
|
|
to_delete.clear();
|
|
|
|
|
2015-04-10 23:16:03 +02:00
|
|
|
list.current()->Unref(&to_delete);
|
2018-10-16 04:59:20 +02:00
|
|
|
int to_delete_size =
|
|
|
|
std::min(num_tables, max_write_buffer_number_to_maintain);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-29 01:34:24 +02:00
|
|
|
ASSERT_EQ(to_delete_size, to_delete.size());
|
2015-05-29 23:36:35 +02:00
|
|
|
|
|
|
|
for (const auto& m : to_delete) {
|
2018-10-06 00:37:45 +02:00
|
|
|
// Refcount should be 0 after calling TryInstallMemtableFlushResults.
|
2015-05-29 23:36:35 +02:00
|
|
|
// Verify this, by Ref'ing then UnRef'ing:
|
|
|
|
m->Ref();
|
|
|
|
ASSERT_EQ(m, m->Unref());
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
to_delete.clear();
|
2015-04-10 03:01:11 +02:00
|
|
|
}
|
|
|
|
|
2019-01-04 05:53:52 +01:00
|
|
|
TEST_F(MemTableListTest, EmptyAtomicFlusTest) {
|
|
|
|
autovector<MemTableList*> lists;
|
|
|
|
autovector<uint32_t> cf_ids;
|
|
|
|
autovector<const MutableCFOptions*> options_list;
|
|
|
|
autovector<const autovector<MemTable*>*> to_flush;
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
Status s = Mock_InstallMemtableAtomicFlushResults(lists, cf_ids, options_list,
|
|
|
|
to_flush, &to_delete);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_TRUE(to_delete.empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemTableListTest, AtomicFlusTest) {
|
2018-10-16 04:59:20 +02:00
|
|
|
const int num_cfs = 3;
|
2019-01-04 05:53:52 +01:00
|
|
|
const int num_tables_per_cf = 2;
|
2018-10-16 04:59:20 +02:00
|
|
|
SequenceNumber seq = 1;
|
|
|
|
|
|
|
|
auto factory = std::make_shared<SkipListFactory>();
|
|
|
|
options.memtable_factory = factory;
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
|
|
|
|
|
|
|
// Create MemTableLists
|
|
|
|
int min_write_buffer_number_to_merge = 3;
|
|
|
|
int max_write_buffer_number_to_maintain = 7;
|
|
|
|
autovector<MemTableList*> lists;
|
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
|
|
|
lists.emplace_back(new MemTableList(min_write_buffer_number_to_merge,
|
|
|
|
max_write_buffer_number_to_maintain));
|
|
|
|
}
|
|
|
|
|
|
|
|
autovector<uint32_t> cf_ids;
|
|
|
|
std::vector<std::vector<MemTable*>> tables(num_cfs);
|
|
|
|
autovector<const MutableCFOptions*> mutable_cf_options_list;
|
|
|
|
uint32_t cf_id = 0;
|
|
|
|
for (auto& elem : tables) {
|
|
|
|
mutable_cf_options_list.emplace_back(new MutableCFOptions(options));
|
|
|
|
uint64_t memtable_id = 0;
|
|
|
|
for (int i = 0; i != num_tables_per_cf; ++i) {
|
|
|
|
MemTable* mem =
|
|
|
|
new MemTable(cmp, ioptions, *(mutable_cf_options_list.back()), &wb,
|
|
|
|
kMaxSequenceNumber, cf_id);
|
|
|
|
mem->SetID(memtable_id++);
|
|
|
|
mem->Ref();
|
|
|
|
|
|
|
|
std::string value;
|
|
|
|
|
|
|
|
mem->Add(++seq, kTypeValue, "key1", ToString(i));
|
|
|
|
mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN");
|
|
|
|
mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value");
|
|
|
|
mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM");
|
|
|
|
mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), "");
|
|
|
|
|
|
|
|
elem.push_back(mem);
|
|
|
|
}
|
|
|
|
cf_ids.push_back(cf_id++);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<autovector<MemTable*>> flush_candidates(num_cfs);
|
|
|
|
|
|
|
|
// Nothing to flush
|
2019-01-04 05:53:52 +01:00
|
|
|
for (auto i = 0; i != num_cfs; ++i) {
|
|
|
|
auto* list = lists[i];
|
2018-10-16 04:59:20 +02:00
|
|
|
ASSERT_FALSE(list->IsFlushPending());
|
|
|
|
ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
list->PickMemtablesToFlush(nullptr /* memtable_id */, &flush_candidates[i]);
|
2019-01-04 05:53:52 +01:00
|
|
|
ASSERT_EQ(0, flush_candidates[i].size());
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
// Request flush even though there is nothing to flush
|
2019-01-04 05:53:52 +01:00
|
|
|
for (auto i = 0; i != num_cfs; ++i) {
|
|
|
|
auto* list = lists[i];
|
2018-10-16 04:59:20 +02:00
|
|
|
list->FlushRequested();
|
|
|
|
ASSERT_FALSE(list->IsFlushPending());
|
|
|
|
ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
// Add tables to the immutable memtalbe lists associated with column families
|
|
|
|
for (auto i = 0; i != num_cfs; ++i) {
|
|
|
|
for (auto j = 0; j != num_tables_per_cf; ++j) {
|
2018-10-16 04:59:20 +02:00
|
|
|
lists[i]->Add(tables[i][j], &to_delete);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(num_tables_per_cf, lists[i]->NumNotFlushed());
|
|
|
|
ASSERT_TRUE(lists[i]->IsFlushPending());
|
|
|
|
ASSERT_TRUE(lists[i]->imm_flush_needed.load(std::memory_order_acquire));
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
std::vector<uint64_t> flush_memtable_ids = {1, 1, 0};
|
|
|
|
// +----+
|
|
|
|
// list[0]: |0 1|
|
|
|
|
// list[1]: |0 1|
|
|
|
|
// | +--+
|
|
|
|
// list[2]: |0| 1
|
|
|
|
// +-+
|
|
|
|
// Pick memtables to flush
|
|
|
|
for (auto i = 0; i != num_cfs; ++i) {
|
|
|
|
flush_candidates[i].clear();
|
|
|
|
lists[i]->PickMemtablesToFlush(&flush_memtable_ids[i],
|
|
|
|
&flush_candidates[i]);
|
|
|
|
ASSERT_EQ(flush_memtable_ids[i] - 0 + 1,
|
|
|
|
static_cast<uint64_t>(flush_candidates[i].size()));
|
|
|
|
}
|
|
|
|
autovector<MemTableList*> tmp_lists;
|
|
|
|
autovector<uint32_t> tmp_cf_ids;
|
|
|
|
autovector<const MutableCFOptions*> tmp_options_list;
|
2018-10-16 04:59:20 +02:00
|
|
|
autovector<const autovector<MemTable*>*> to_flush;
|
2019-01-04 05:53:52 +01:00
|
|
|
for (auto i = 0; i != num_cfs; ++i) {
|
|
|
|
if (!flush_candidates[i].empty()) {
|
|
|
|
to_flush.push_back(&flush_candidates[i]);
|
|
|
|
tmp_lists.push_back(lists[i]);
|
|
|
|
tmp_cf_ids.push_back(i);
|
|
|
|
tmp_options_list.push_back(mutable_cf_options_list[i]);
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
}
|
|
|
|
Status s = Mock_InstallMemtableAtomicFlushResults(
|
|
|
|
tmp_lists, tmp_cf_ids, tmp_options_list, to_flush, &to_delete);
|
|
|
|
ASSERT_OK(s);
|
2018-10-16 04:59:20 +02:00
|
|
|
|
2019-01-04 05:53:52 +01:00
|
|
|
for (auto i = 0; i != num_cfs; ++i) {
|
|
|
|
for (auto j = 0; j != num_tables_per_cf; ++j) {
|
|
|
|
if (static_cast<uint64_t>(j) <= flush_memtable_ids[i]) {
|
|
|
|
ASSERT_LT(0, tables[i][j]->GetFileNumber());
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
ASSERT_EQ(
|
|
|
|
static_cast<size_t>(num_tables_per_cf) - flush_candidates[i].size(),
|
|
|
|
lists[i]->NumNotFlushed());
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
to_delete.clear();
|
|
|
|
for (auto list : lists) {
|
|
|
|
list->current()->Unref(&to_delete);
|
|
|
|
delete list;
|
|
|
|
}
|
|
|
|
for (auto& mutable_cf_options : mutable_cf_options_list) {
|
|
|
|
if (mutable_cf_options != nullptr) {
|
|
|
|
delete mutable_cf_options;
|
|
|
|
mutable_cf_options = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// All memtables in tables array must have been flushed, thus ready to be
|
|
|
|
// deleted.
|
|
|
|
ASSERT_EQ(to_delete.size(), tables.size() * tables.front().size());
|
|
|
|
for (const auto& m : to_delete) {
|
|
|
|
// Refcount should be 0 after calling InstallMemtableFlushResults.
|
|
|
|
// Verify this by Ref'ing and then Unref'ing.
|
|
|
|
m->Ref();
|
|
|
|
ASSERT_EQ(m, m->Unref());
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-10 03:01:11 +02:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|