2015-11-24 22:01:09 +01:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/inlineskiplist.h"
|
|
|
|
#include <set>
|
|
|
|
#include "rocksdb/env.h"
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
#include "util/concurrent_arena.h"
|
2015-11-24 22:01:09 +01:00
|
|
|
#include "util/hash.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
2015-11-24 22:29:50 +01:00
|
|
|
// Our test skip list stores 8-byte unsigned integers
|
2015-11-24 22:01:09 +01:00
|
|
|
typedef uint64_t Key;
|
|
|
|
|
2015-11-24 22:29:50 +01:00
|
|
|
static const char* Encode(const uint64_t* key) {
|
|
|
|
return reinterpret_cast<const char*>(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Key Decode(const char* key) {
|
|
|
|
Key rv;
|
|
|
|
memcpy(&rv, key, sizeof(Key));
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2015-11-24 22:01:09 +01:00
|
|
|
struct TestComparator {
|
2015-11-24 22:29:50 +01:00
|
|
|
int operator()(const char* a, const char* b) const {
|
|
|
|
if (Decode(a) < Decode(b)) {
|
2015-11-24 22:01:09 +01:00
|
|
|
return -1;
|
2015-11-24 22:29:50 +01:00
|
|
|
} else if (Decode(a) > Decode(b)) {
|
2015-11-24 22:01:09 +01:00
|
|
|
return +1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class InlineSkipTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(InlineSkipTest, Empty) {
|
|
|
|
Arena arena;
|
|
|
|
TestComparator cmp;
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator> list(cmp, &arena);
|
|
|
|
Key key = 10;
|
|
|
|
ASSERT_TRUE(!list.Contains(Encode(&key)));
|
2015-11-24 22:01:09 +01:00
|
|
|
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator>::Iterator iter(&list);
|
2015-11-24 22:01:09 +01:00
|
|
|
ASSERT_TRUE(!iter.Valid());
|
|
|
|
iter.SeekToFirst();
|
|
|
|
ASSERT_TRUE(!iter.Valid());
|
2015-11-24 22:29:50 +01:00
|
|
|
key = 100;
|
|
|
|
iter.Seek(Encode(&key));
|
2015-11-24 22:01:09 +01:00
|
|
|
ASSERT_TRUE(!iter.Valid());
|
|
|
|
iter.SeekToLast();
|
|
|
|
ASSERT_TRUE(!iter.Valid());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(InlineSkipTest, InsertAndLookup) {
|
|
|
|
const int N = 2000;
|
|
|
|
const int R = 5000;
|
|
|
|
Random rnd(1000);
|
|
|
|
std::set<Key> keys;
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
ConcurrentArena arena;
|
2015-11-24 22:01:09 +01:00
|
|
|
TestComparator cmp;
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator> list(cmp, &arena);
|
2015-11-24 22:01:09 +01:00
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
Key key = rnd.Next() % R;
|
|
|
|
if (keys.insert(key).second) {
|
2015-11-24 22:29:50 +01:00
|
|
|
char* buf = list.AllocateKey(sizeof(Key));
|
|
|
|
memcpy(buf, &key, sizeof(Key));
|
|
|
|
list.Insert(buf);
|
2015-11-24 22:01:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 22:29:50 +01:00
|
|
|
for (Key i = 0; i < R; i++) {
|
|
|
|
if (list.Contains(Encode(&i))) {
|
2015-11-24 22:01:09 +01:00
|
|
|
ASSERT_EQ(keys.count(i), 1U);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(keys.count(i), 0U);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Simple iterator tests
|
|
|
|
{
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator>::Iterator iter(&list);
|
2015-11-24 22:01:09 +01:00
|
|
|
ASSERT_TRUE(!iter.Valid());
|
|
|
|
|
2015-11-24 22:29:50 +01:00
|
|
|
uint64_t zero = 0;
|
|
|
|
iter.Seek(Encode(&zero));
|
2015-11-24 22:01:09 +01:00
|
|
|
ASSERT_TRUE(iter.Valid());
|
2015-11-24 22:29:50 +01:00
|
|
|
ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
iter.SeekToFirst();
|
|
|
|
ASSERT_TRUE(iter.Valid());
|
2015-11-24 22:29:50 +01:00
|
|
|
ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
iter.SeekToLast();
|
|
|
|
ASSERT_TRUE(iter.Valid());
|
2015-11-24 22:29:50 +01:00
|
|
|
ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
|
2015-11-24 22:01:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Forward iteration test
|
2015-11-24 22:29:50 +01:00
|
|
|
for (Key i = 0; i < R; i++) {
|
|
|
|
InlineSkipList<TestComparator>::Iterator iter(&list);
|
|
|
|
iter.Seek(Encode(&i));
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
// Compare against model iterator
|
|
|
|
std::set<Key>::iterator model_iter = keys.lower_bound(i);
|
|
|
|
for (int j = 0; j < 3; j++) {
|
|
|
|
if (model_iter == keys.end()) {
|
|
|
|
ASSERT_TRUE(!iter.Valid());
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(iter.Valid());
|
2015-11-24 22:29:50 +01:00
|
|
|
ASSERT_EQ(*model_iter, Decode(iter.key()));
|
2015-11-24 22:01:09 +01:00
|
|
|
++model_iter;
|
|
|
|
iter.Next();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backward iteration test
|
|
|
|
{
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator>::Iterator iter(&list);
|
2015-11-24 22:01:09 +01:00
|
|
|
iter.SeekToLast();
|
|
|
|
|
|
|
|
// Compare against model iterator
|
|
|
|
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
|
|
|
|
model_iter != keys.rend(); ++model_iter) {
|
|
|
|
ASSERT_TRUE(iter.Valid());
|
2015-11-24 22:29:50 +01:00
|
|
|
ASSERT_EQ(*model_iter, Decode(iter.key()));
|
2015-11-24 22:01:09 +01:00
|
|
|
iter.Prev();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter.Valid());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We want to make sure that with a single writer and multiple
|
|
|
|
// concurrent readers (with no synchronization other than when a
|
|
|
|
// reader's iterator is created), the reader always observes all the
|
|
|
|
// data that was present in the skip list when the iterator was
|
|
|
|
// constructor. Because insertions are happening concurrently, we may
|
|
|
|
// also observe new values that were inserted since the iterator was
|
|
|
|
// constructed, but we should never miss any values that were present
|
|
|
|
// at iterator construction time.
|
|
|
|
//
|
|
|
|
// We generate multi-part keys:
|
|
|
|
// <key,gen,hash>
|
|
|
|
// where:
|
|
|
|
// key is in range [0..K-1]
|
|
|
|
// gen is a generation number for key
|
|
|
|
// hash is hash(key,gen)
|
|
|
|
//
|
|
|
|
// The insertion code picks a random key, sets gen to be 1 + the last
|
|
|
|
// generation number inserted for that key, and sets hash to Hash(key,gen).
|
|
|
|
//
|
|
|
|
// At the beginning of a read, we snapshot the last inserted
|
|
|
|
// generation number for each key. We then iterate, including random
|
|
|
|
// calls to Next() and Seek(). For every key we encounter, we
|
|
|
|
// check that it is either expected given the initial snapshot or has
|
|
|
|
// been concurrently added since the iterator started.
|
|
|
|
class ConcurrentTest {
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
public:
|
|
|
|
static const uint32_t K = 8;
|
2015-11-24 22:01:09 +01:00
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
private:
|
2015-11-24 22:01:09 +01:00
|
|
|
static uint64_t key(Key key) { return (key >> 40); }
|
|
|
|
static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
|
|
|
|
static uint64_t hash(Key key) { return key & 0xff; }
|
|
|
|
|
|
|
|
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
|
|
|
|
uint64_t data[2] = {k, g};
|
|
|
|
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Key MakeKey(uint64_t k, uint64_t g) {
|
|
|
|
assert(sizeof(Key) == sizeof(uint64_t));
|
|
|
|
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
|
|
|
|
assert(g <= 0xffffffffu);
|
|
|
|
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool IsValidKey(Key k) {
|
|
|
|
return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Key RandomTarget(Random* rnd) {
|
|
|
|
switch (rnd->Next() % 10) {
|
|
|
|
case 0:
|
|
|
|
// Seek to beginning
|
|
|
|
return MakeKey(0, 0);
|
|
|
|
case 1:
|
|
|
|
// Seek to end
|
|
|
|
return MakeKey(K, 0);
|
|
|
|
default:
|
|
|
|
// Seek to middle
|
|
|
|
return MakeKey(rnd->Next() % K, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Per-key generation
|
|
|
|
struct State {
|
|
|
|
std::atomic<int> generation[K];
|
|
|
|
void Set(int k, int v) {
|
|
|
|
generation[k].store(v, std::memory_order_release);
|
|
|
|
}
|
|
|
|
int Get(int k) { return generation[k].load(std::memory_order_acquire); }
|
|
|
|
|
|
|
|
State() {
|
|
|
|
for (unsigned int k = 0; k < K; k++) {
|
|
|
|
Set(k, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Current state of the test
|
|
|
|
State current_;
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
ConcurrentArena arena_;
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
// InlineSkipList is not protected by mu_. We just use a single writer
|
|
|
|
// thread to modify it.
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator> list_;
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
ConcurrentTest() : list_(TestComparator(), &arena_) {}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
// REQUIRES: No concurrent calls to WriteStep or ConcurrentWriteStep
|
2015-11-24 22:01:09 +01:00
|
|
|
void WriteStep(Random* rnd) {
|
|
|
|
const uint32_t k = rnd->Next() % K;
|
|
|
|
const int g = current_.Get(k) + 1;
|
|
|
|
const Key new_key = MakeKey(k, g);
|
2015-11-24 22:29:50 +01:00
|
|
|
char* buf = list_.AllocateKey(sizeof(Key));
|
|
|
|
memcpy(buf, &new_key, sizeof(Key));
|
|
|
|
list_.Insert(buf);
|
2015-11-24 22:01:09 +01:00
|
|
|
current_.Set(k, g);
|
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
// REQUIRES: No concurrent calls for the same k
|
|
|
|
void ConcurrentWriteStep(uint32_t k) {
|
|
|
|
const int g = current_.Get(k) + 1;
|
|
|
|
const Key new_key = MakeKey(k, g);
|
|
|
|
char* buf = list_.AllocateKey(sizeof(Key));
|
|
|
|
memcpy(buf, &new_key, sizeof(Key));
|
|
|
|
list_.InsertConcurrently(buf);
|
|
|
|
ASSERT_EQ(g, current_.Get(k) + 1);
|
|
|
|
current_.Set(k, g);
|
|
|
|
}
|
|
|
|
|
2015-11-24 22:01:09 +01:00
|
|
|
void ReadStep(Random* rnd) {
|
|
|
|
// Remember the initial committed state of the skiplist.
|
|
|
|
State initial_state;
|
|
|
|
for (unsigned int k = 0; k < K; k++) {
|
|
|
|
initial_state.Set(k, current_.Get(k));
|
|
|
|
}
|
|
|
|
|
|
|
|
Key pos = RandomTarget(rnd);
|
2015-11-24 22:29:50 +01:00
|
|
|
InlineSkipList<TestComparator>::Iterator iter(&list_);
|
|
|
|
iter.Seek(Encode(&pos));
|
2015-11-24 22:01:09 +01:00
|
|
|
while (true) {
|
|
|
|
Key current;
|
|
|
|
if (!iter.Valid()) {
|
|
|
|
current = MakeKey(K, 0);
|
|
|
|
} else {
|
2015-11-24 22:29:50 +01:00
|
|
|
current = Decode(iter.key());
|
2015-11-24 22:01:09 +01:00
|
|
|
ASSERT_TRUE(IsValidKey(current)) << current;
|
|
|
|
}
|
|
|
|
ASSERT_LE(pos, current) << "should not go backwards";
|
|
|
|
|
|
|
|
// Verify that everything in [pos,current) was not present in
|
|
|
|
// initial_state.
|
|
|
|
while (pos < current) {
|
|
|
|
ASSERT_LT(key(pos), K) << pos;
|
|
|
|
|
|
|
|
// Note that generation 0 is never inserted, so it is ok if
|
|
|
|
// <*,0,*> is missing.
|
|
|
|
ASSERT_TRUE((gen(pos) == 0U) ||
|
|
|
|
(gen(pos) > static_cast<uint64_t>(initial_state.Get(
|
|
|
|
static_cast<int>(key(pos))))))
|
|
|
|
<< "key: " << key(pos) << "; gen: " << gen(pos)
|
|
|
|
<< "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
|
|
|
|
|
|
|
|
// Advance to next key in the valid key space
|
|
|
|
if (key(pos) < key(current)) {
|
|
|
|
pos = MakeKey(key(pos) + 1, 0);
|
|
|
|
} else {
|
|
|
|
pos = MakeKey(key(pos), gen(pos) + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!iter.Valid()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rnd->Next() % 2) {
|
|
|
|
iter.Next();
|
|
|
|
pos = MakeKey(key(pos), gen(pos) + 1);
|
|
|
|
} else {
|
|
|
|
Key new_target = RandomTarget(rnd);
|
|
|
|
if (new_target > pos) {
|
|
|
|
pos = new_target;
|
2015-11-24 22:29:50 +01:00
|
|
|
iter.Seek(Encode(&new_target));
|
2015-11-24 22:01:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
const uint32_t ConcurrentTest::K;
|
|
|
|
|
|
|
|
// Simple test that does single-threaded testing of the ConcurrentTest
|
|
|
|
// scaffolding.
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
TEST_F(InlineSkipTest, ConcurrentReadWithoutThreads) {
|
2015-11-24 22:01:09 +01:00
|
|
|
ConcurrentTest test;
|
|
|
|
Random rnd(test::RandomSeed());
|
|
|
|
for (int i = 0; i < 10000; i++) {
|
|
|
|
test.ReadStep(&rnd);
|
|
|
|
test.WriteStep(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
TEST_F(InlineSkipTest, ConcurrentInsertWithoutThreads) {
|
|
|
|
ConcurrentTest test;
|
|
|
|
Random rnd(test::RandomSeed());
|
|
|
|
for (int i = 0; i < 10000; i++) {
|
|
|
|
test.ReadStep(&rnd);
|
|
|
|
uint32_t base = rnd.Next();
|
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
test.ConcurrentWriteStep((base + j) % ConcurrentTest::K);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 22:01:09 +01:00
|
|
|
class TestState {
|
|
|
|
public:
|
|
|
|
ConcurrentTest t_;
|
|
|
|
int seed_;
|
|
|
|
std::atomic<bool> quit_flag_;
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
std::atomic<uint32_t> next_writer_;
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
enum ReaderState { STARTING, RUNNING, DONE };
|
|
|
|
|
|
|
|
explicit TestState(int s)
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
: seed_(s),
|
|
|
|
quit_flag_(false),
|
|
|
|
state_(STARTING),
|
|
|
|
pending_writers_(0),
|
|
|
|
state_cv_(&mu_) {}
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
void Wait(ReaderState s) {
|
|
|
|
mu_.Lock();
|
|
|
|
while (state_ != s) {
|
|
|
|
state_cv_.Wait();
|
|
|
|
}
|
|
|
|
mu_.Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Change(ReaderState s) {
|
|
|
|
mu_.Lock();
|
|
|
|
state_ = s;
|
|
|
|
state_cv_.Signal();
|
|
|
|
mu_.Unlock();
|
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
void AdjustPendingWriters(int delta) {
|
|
|
|
mu_.Lock();
|
|
|
|
pending_writers_ += delta;
|
|
|
|
if (pending_writers_ == 0) {
|
|
|
|
state_cv_.Signal();
|
|
|
|
}
|
|
|
|
mu_.Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void WaitForPendingWriters() {
|
|
|
|
mu_.Lock();
|
|
|
|
while (pending_writers_ != 0) {
|
|
|
|
state_cv_.Wait();
|
|
|
|
}
|
|
|
|
mu_.Unlock();
|
|
|
|
}
|
|
|
|
|
2015-11-24 22:01:09 +01:00
|
|
|
private:
|
|
|
|
port::Mutex mu_;
|
|
|
|
ReaderState state_;
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
int pending_writers_;
|
2015-11-24 22:01:09 +01:00
|
|
|
port::CondVar state_cv_;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void ConcurrentReader(void* arg) {
|
|
|
|
TestState* state = reinterpret_cast<TestState*>(arg);
|
|
|
|
Random rnd(state->seed_);
|
|
|
|
int64_t reads = 0;
|
|
|
|
state->Change(TestState::RUNNING);
|
|
|
|
while (!state->quit_flag_.load(std::memory_order_acquire)) {
|
|
|
|
state->t_.ReadStep(&rnd);
|
|
|
|
++reads;
|
|
|
|
}
|
|
|
|
state->Change(TestState::DONE);
|
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
static void ConcurrentWriter(void* arg) {
|
|
|
|
TestState* state = reinterpret_cast<TestState*>(arg);
|
|
|
|
uint32_t k = state->next_writer_++ % ConcurrentTest::K;
|
|
|
|
state->t_.ConcurrentWriteStep(k);
|
|
|
|
state->AdjustPendingWriters(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void RunConcurrentRead(int run) {
|
2015-11-24 22:01:09 +01:00
|
|
|
const int seed = test::RandomSeed() + (run * 100);
|
|
|
|
Random rnd(seed);
|
|
|
|
const int N = 1000;
|
|
|
|
const int kSize = 1000;
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
if ((i % 100) == 0) {
|
|
|
|
fprintf(stderr, "Run %d of %d\n", i, N);
|
|
|
|
}
|
|
|
|
TestState state(seed + 1);
|
|
|
|
Env::Default()->Schedule(ConcurrentReader, &state);
|
|
|
|
state.Wait(TestState::RUNNING);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
for (int k = 0; k < kSize; ++k) {
|
2015-11-24 22:01:09 +01:00
|
|
|
state.t_.WriteStep(&rnd);
|
|
|
|
}
|
|
|
|
state.quit_flag_.store(true, std::memory_order_release);
|
|
|
|
state.Wait(TestState::DONE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
|
|
|
static void RunConcurrentInsert(int run, int write_parallelism = 4) {
|
|
|
|
Env::Default()->SetBackgroundThreads(1 + write_parallelism,
|
|
|
|
Env::Priority::LOW);
|
|
|
|
const int seed = test::RandomSeed() + (run * 100);
|
|
|
|
Random rnd(seed);
|
|
|
|
const int N = 1000;
|
|
|
|
const int kSize = 1000;
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
if ((i % 100) == 0) {
|
|
|
|
fprintf(stderr, "Run %d of %d\n", i, N);
|
|
|
|
}
|
|
|
|
TestState state(seed + 1);
|
|
|
|
Env::Default()->Schedule(ConcurrentReader, &state);
|
|
|
|
state.Wait(TestState::RUNNING);
|
|
|
|
for (int k = 0; k < kSize; k += write_parallelism) {
|
|
|
|
state.next_writer_ = rnd.Next();
|
|
|
|
state.AdjustPendingWriters(write_parallelism);
|
|
|
|
for (int p = 0; p < write_parallelism; ++p) {
|
|
|
|
Env::Default()->Schedule(ConcurrentWriter, &state);
|
|
|
|
}
|
|
|
|
state.WaitForPendingWriters();
|
|
|
|
}
|
|
|
|
state.quit_flag_.store(true, std::memory_order_release);
|
|
|
|
state.Wait(TestState::DONE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentRead1) { RunConcurrentRead(1); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentRead2) { RunConcurrentRead(2); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentRead3) { RunConcurrentRead(3); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentRead4) { RunConcurrentRead(4); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentRead5) { RunConcurrentRead(5); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentInsert1) { RunConcurrentInsert(1); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentInsert2) { RunConcurrentInsert(2); }
|
|
|
|
TEST_F(InlineSkipTest, ConcurrentInsert3) { RunConcurrentInsert(3); }
|
2015-11-24 22:01:09 +01:00
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|