rocksdb/test_util/sync_point_impl.cc
sdong fdf882ded2 Replace namespace name "rocksdb" with ROCKSDB_NAMESPACE (#6433)
Summary:
When dynamically linking two binaries together, different builds of RocksDB from two sources might cause errors. To provide a tool for user to solve the problem, the RocksDB namespace is changed to a flag which can be overridden in build time.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6433

Test Plan: Build release, all and jtest. Try to build with ROCKSDB_NAMESPACE with another flag.

Differential Revision: D19977691

fbshipit-source-id: aa7f2d0972e1c31d75339ac48478f34f6cfcfb3e
2020-02-20 12:09:57 -08:00

130 lines
3.5 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "test_util/sync_point_impl.h"
#ifndef NDEBUG
namespace ROCKSDB_NAMESPACE {
void TestKillRandom(std::string kill_point, int odds,
const std::string& srcfile, int srcline) {
for (auto& p : rocksdb_kill_prefix_blacklist) {
if (kill_point.substr(0, p.length()) == p) {
return;
}
}
assert(odds > 0);
if (odds % 7 == 0) {
// class Random uses multiplier 16807, which is 7^5. If odds are
// multiplier of 7, there might be limited values generated.
odds++;
}
auto* r = Random::GetTLSInstance();
bool crash = r->OneIn(odds);
if (crash) {
port::Crash(srcfile, srcline);
}
}
void SyncPoint::Data::LoadDependency(const std::vector<SyncPointPair>& dependencies) {
std::lock_guard<std::mutex> lock(mutex_);
successors_.clear();
predecessors_.clear();
cleared_points_.clear();
for (const auto& dependency : dependencies) {
successors_[dependency.predecessor].push_back(dependency.successor);
predecessors_[dependency.successor].push_back(dependency.predecessor);
}
cv_.notify_all();
}
void SyncPoint::Data::LoadDependencyAndMarkers(
const std::vector<SyncPointPair>& dependencies,
const std::vector<SyncPointPair>& markers) {
std::lock_guard<std::mutex> lock(mutex_);
successors_.clear();
predecessors_.clear();
cleared_points_.clear();
markers_.clear();
marked_thread_id_.clear();
for (const auto& dependency : dependencies) {
successors_[dependency.predecessor].push_back(dependency.successor);
predecessors_[dependency.successor].push_back(dependency.predecessor);
}
for (const auto& marker : markers) {
successors_[marker.predecessor].push_back(marker.successor);
predecessors_[marker.successor].push_back(marker.predecessor);
markers_[marker.predecessor].push_back(marker.successor);
}
cv_.notify_all();
}
bool SyncPoint::Data::PredecessorsAllCleared(const std::string& point) {
for (const auto& pred : predecessors_[point]) {
if (cleared_points_.count(pred) == 0) {
return false;
}
}
return true;
}
void SyncPoint::Data::ClearCallBack(const std::string& point) {
std::unique_lock<std::mutex> lock(mutex_);
while (num_callbacks_running_ > 0) {
cv_.wait(lock);
}
callbacks_.erase(point);
}
void SyncPoint::Data::ClearAllCallBacks() {
std::unique_lock<std::mutex> lock(mutex_);
while (num_callbacks_running_ > 0) {
cv_.wait(lock);
}
callbacks_.clear();
}
void SyncPoint::Data::Process(const std::string& point, void* cb_arg) {
if (!enabled_) {
return;
}
std::unique_lock<std::mutex> lock(mutex_);
auto thread_id = std::this_thread::get_id();
auto marker_iter = markers_.find(point);
if (marker_iter != markers_.end()) {
for (auto& marked_point : marker_iter->second) {
marked_thread_id_.emplace(marked_point, thread_id);
}
}
if (DisabledByMarker(point, thread_id)) {
return;
}
while (!PredecessorsAllCleared(point)) {
cv_.wait(lock);
if (DisabledByMarker(point, thread_id)) {
return;
}
}
auto callback_pair = callbacks_.find(point);
if (callback_pair != callbacks_.end()) {
num_callbacks_running_++;
mutex_.unlock();
callback_pair->second(cb_arg);
mutex_.lock();
num_callbacks_running_--;
}
cleared_points_.insert(point);
cv_.notify_all();
}
} // namespace ROCKSDB_NAMESPACE
#endif