2014-09-13 01:23:58 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
|
|
|
|
#include "db/write_thread.h"
|
2015-12-09 02:01:02 +01:00
|
|
|
#include "util/sync_point.h"
|
2014-09-13 01:23:58 +02:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
void WriteThread::Await(Writer* w) {
|
|
|
|
std::unique_lock<std::mutex> guard(w->JoinMutex());
|
|
|
|
w->JoinCV().wait(guard, [w] { return w->joined; });
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteThread::MarkJoined(Writer* w) {
|
|
|
|
std::lock_guard<std::mutex> guard(w->JoinMutex());
|
|
|
|
assert(!w->joined);
|
|
|
|
w->joined = true;
|
|
|
|
w->JoinCV().notify_one();
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteThread::LinkOne(Writer* w, bool* wait_needed) {
|
|
|
|
assert(!w->joined && !w->done);
|
|
|
|
|
|
|
|
Writer* writers = newest_writer_.load(std::memory_order_relaxed);
|
|
|
|
while (true) {
|
|
|
|
w->link_older = writers;
|
|
|
|
if (writers != nullptr) {
|
|
|
|
w->CreateMutex();
|
|
|
|
}
|
|
|
|
if (newest_writer_.compare_exchange_strong(writers, w)) {
|
|
|
|
// Success.
|
|
|
|
*wait_needed = (writers != nullptr);
|
|
|
|
return;
|
|
|
|
}
|
2014-09-13 01:23:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
void WriteThread::CreateMissingNewerLinks(Writer* head) {
|
|
|
|
while (true) {
|
|
|
|
Writer* next = head->link_older;
|
|
|
|
if (next == nullptr || next->link_newer != nullptr) {
|
|
|
|
assert(next == nullptr || next->link_newer == head);
|
|
|
|
break;
|
2014-09-13 01:23:58 +02:00
|
|
|
}
|
2015-08-06 01:56:28 +02:00
|
|
|
next->link_newer = head;
|
|
|
|
head = next;
|
2014-09-13 01:23:58 +02:00
|
|
|
}
|
2015-08-06 01:56:28 +02:00
|
|
|
}
|
2014-09-13 01:23:58 +02:00
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
void WriteThread::JoinBatchGroup(Writer* w) {
|
|
|
|
assert(w->batch != nullptr);
|
|
|
|
bool wait_needed;
|
|
|
|
LinkOne(w, &wait_needed);
|
|
|
|
if (wait_needed) {
|
|
|
|
Await(w);
|
2014-09-13 01:23:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
size_t WriteThread::EnterAsBatchGroupLeader(
|
|
|
|
Writer* leader, WriteThread::Writer** last_writer,
|
2015-05-16 00:52:51 +02:00
|
|
|
autovector<WriteBatch*>* write_batch_group) {
|
2015-08-06 01:56:28 +02:00
|
|
|
assert(leader->link_older == nullptr);
|
|
|
|
assert(leader->batch != nullptr);
|
2014-09-13 01:23:58 +02:00
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
size_t size = WriteBatchInternal::ByteSize(leader->batch);
|
|
|
|
write_batch_group->push_back(leader->batch);
|
2014-09-13 01:23:58 +02:00
|
|
|
|
|
|
|
// Allow the group to grow up to a maximum size, but if the
|
|
|
|
// original write is small, limit the growth so we do not slow
|
|
|
|
// down the small write too much.
|
|
|
|
size_t max_size = 1 << 20;
|
2015-08-06 01:56:28 +02:00
|
|
|
if (size <= (128 << 10)) {
|
|
|
|
max_size = size + (128 << 10);
|
2014-09-13 01:23:58 +02:00
|
|
|
}
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
*last_writer = leader;
|
2015-05-29 23:36:35 +02:00
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
if (leader->has_callback) {
|
2015-05-29 23:36:35 +02:00
|
|
|
// TODO(agiardullo:) Batching not currently supported as this write may
|
|
|
|
// fail if the callback function decides to abort this write.
|
2015-05-16 00:52:51 +02:00
|
|
|
return size;
|
2015-05-29 23:36:35 +02:00
|
|
|
}
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
Writer* newest_writer = newest_writer_.load(std::memory_order_acquire);
|
|
|
|
|
|
|
|
// This is safe regardless of any db mutex status of the caller. Previous
|
|
|
|
// calls to ExitAsGroupLeader either didn't call CreateMissingNewerLinks
|
|
|
|
// (they emptied the list and then we added ourself as leader) or had to
|
|
|
|
// explicitly wake up us (the list was non-empty when we added ourself,
|
|
|
|
// so we have already received our MarkJoined).
|
|
|
|
CreateMissingNewerLinks(newest_writer);
|
|
|
|
|
|
|
|
// Tricky. Iteration start (leader) is exclusive and finish
|
|
|
|
// (newest_writer) is inclusive. Iteration goes from old to new.
|
|
|
|
Writer* w = leader;
|
|
|
|
while (w != newest_writer) {
|
|
|
|
w = w->link_newer;
|
|
|
|
|
|
|
|
if (w->sync && !leader->sync) {
|
2014-09-13 01:23:58 +02:00
|
|
|
// Do not include a sync write into a batch handled by a non-sync write.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
if (!w->disableWAL && leader->disableWAL) {
|
2014-09-13 01:23:58 +02:00
|
|
|
// Do not include a write that needs WAL into a batch that has
|
|
|
|
// WAL disabled.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-05-29 23:36:35 +02:00
|
|
|
if (w->has_callback) {
|
|
|
|
// Do not include writes which may be aborted if the callback does not
|
|
|
|
// succeed.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:23:58 +02:00
|
|
|
if (w->batch == nullptr) {
|
|
|
|
// Do not include those writes with nullptr batch. Those are not writes,
|
|
|
|
// those are something else. They want to be alone
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-11-02 18:23:39 +01:00
|
|
|
auto batch_size = WriteBatchInternal::ByteSize(w->batch);
|
|
|
|
if (size + batch_size > max_size) {
|
2014-09-13 01:23:58 +02:00
|
|
|
// Do not make batch too big
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-11-02 18:23:39 +01:00
|
|
|
size += batch_size;
|
2014-09-13 01:23:58 +02:00
|
|
|
write_batch_group->push_back(w->batch);
|
|
|
|
w->in_batch_group = true;
|
|
|
|
*last_writer = w;
|
|
|
|
}
|
2015-05-16 00:52:51 +02:00
|
|
|
return size;
|
2014-09-13 01:23:58 +02:00
|
|
|
}
|
|
|
|
|
2015-08-06 01:56:28 +02:00
|
|
|
void WriteThread::ExitAsBatchGroupLeader(Writer* leader, Writer* last_writer,
|
|
|
|
Status status) {
|
|
|
|
assert(leader->link_older == nullptr);
|
|
|
|
|
|
|
|
Writer* head = newest_writer_.load(std::memory_order_acquire);
|
|
|
|
if (head != last_writer ||
|
|
|
|
!newest_writer_.compare_exchange_strong(head, nullptr)) {
|
|
|
|
// Either w wasn't the head during the load(), or it was the head
|
|
|
|
// during the load() but somebody else pushed onto the list before
|
|
|
|
// we did the compare_exchange_strong (causing it to fail). In the
|
|
|
|
// latter case compare_exchange_strong has the effect of re-reading
|
|
|
|
// its first param (head). No need to retry a failing CAS, because
|
|
|
|
// only a departing leader (which we are at the moment) can remove
|
|
|
|
// nodes from the list.
|
|
|
|
assert(head != last_writer);
|
|
|
|
|
|
|
|
// After walking link_older starting from head (if not already done)
|
|
|
|
// we will be able to traverse w->link_newer below. This function
|
|
|
|
// can only be called from an active leader, only a leader can
|
|
|
|
// clear newest_writer_, we didn't, and only a clear newest_writer_
|
|
|
|
// could cause the next leader to start their work without a call
|
|
|
|
// to MarkJoined, so we can definitely conclude that no other leader
|
|
|
|
// work is going on here (with or without db mutex).
|
|
|
|
CreateMissingNewerLinks(head);
|
|
|
|
assert(last_writer->link_newer->link_older == last_writer);
|
|
|
|
last_writer->link_newer->link_older = nullptr;
|
|
|
|
|
|
|
|
// Next leader didn't self-identify, because newest_writer_ wasn't
|
|
|
|
// nullptr when they enqueued (we were definitely enqueued before them
|
|
|
|
// and are still in the list). That means leader handoff occurs when
|
|
|
|
// we call MarkJoined
|
|
|
|
MarkJoined(last_writer->link_newer);
|
|
|
|
}
|
|
|
|
// else nobody else was waiting, although there might already be a new
|
|
|
|
// leader now
|
|
|
|
|
|
|
|
while (last_writer != leader) {
|
|
|
|
last_writer->status = status;
|
|
|
|
last_writer->done = true;
|
|
|
|
// We must read link_older before calling MarkJoined, because as
|
|
|
|
// soon as it is marked the other thread's AwaitJoined may return
|
|
|
|
// and deallocate the Writer.
|
|
|
|
auto next = last_writer->link_older;
|
|
|
|
MarkJoined(last_writer);
|
|
|
|
last_writer = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) {
|
|
|
|
assert(w->batch == nullptr);
|
|
|
|
bool wait_needed;
|
|
|
|
LinkOne(w, &wait_needed);
|
|
|
|
if (wait_needed) {
|
|
|
|
mu->Unlock();
|
2015-12-09 02:01:02 +01:00
|
|
|
TEST_SYNC_POINT("WriteThread::EnterUnbatched:Wait");
|
2015-08-06 01:56:28 +02:00
|
|
|
Await(w);
|
|
|
|
mu->Lock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteThread::ExitUnbatched(Writer* w) {
|
|
|
|
Status dummy_status;
|
|
|
|
ExitAsBatchGroupLeader(w, w, dummy_status);
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:23:58 +02:00
|
|
|
} // namespace rocksdb
|