2018-12-31 20:04:05 +01:00
|
|
|
//
|
2022-01-01 01:35:39 +01:00
|
|
|
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2022
|
2018-12-31 20:04:05 +01:00
|
|
|
//
|
|
|
|
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
|
|
|
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
|
|
|
//
|
2020-11-23 01:24:36 +01:00
|
|
|
#include "td/actor/ConcurrentScheduler.h"
|
2018-12-31 20:04:05 +01:00
|
|
|
|
2020-11-22 21:30:40 +01:00
|
|
|
#include "td/utils/ExitGuard.h"
|
2018-12-31 20:04:05 +01:00
|
|
|
#include "td/utils/MpscPollableQueue.h"
|
|
|
|
#include "td/utils/port/thread_local.h"
|
2020-12-22 13:51:57 +01:00
|
|
|
#include "td/utils/ScopeGuard.h"
|
2018-12-31 20:04:05 +01:00
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
namespace td {
|
|
|
|
|
2022-09-14 14:06:52 +02:00
|
|
|
ConcurrentScheduler::ConcurrentScheduler(int32 additional_thread_count, uint64 thread_affinity_mask) {
|
2018-12-31 20:04:05 +01:00
|
|
|
#if TD_THREAD_UNSUPPORTED || TD_EVENTFD_UNSUPPORTED
|
2022-09-14 13:49:48 +02:00
|
|
|
additional_thread_count = 0;
|
2018-12-31 20:04:05 +01:00
|
|
|
#endif
|
2022-09-14 13:49:48 +02:00
|
|
|
additional_thread_count++;
|
|
|
|
std::vector<std::shared_ptr<MpscPollableQueue<EventFull>>> outbound(additional_thread_count);
|
2018-09-07 02:41:21 +02:00
|
|
|
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
|
2022-09-14 13:49:48 +02:00
|
|
|
for (int32 i = 0; i < additional_thread_count; i++) {
|
2018-12-31 20:04:05 +01:00
|
|
|
auto queue = std::make_shared<MpscPollableQueue<EventFull>>();
|
|
|
|
queue->init();
|
|
|
|
outbound[i] = queue;
|
|
|
|
}
|
2022-09-14 13:49:48 +02:00
|
|
|
thread_affinity_mask_ = thread_affinity_mask;
|
2018-09-07 02:41:21 +02:00
|
|
|
#endif
|
2018-12-31 20:04:05 +01:00
|
|
|
|
2018-08-16 15:56:16 +02:00
|
|
|
// +1 for extra scheduler for IOCP and send_closure from unrelated threads
|
|
|
|
// It will know about other schedulers
|
2018-09-07 02:41:21 +02:00
|
|
|
// Other schedulers will have no idea about its existence
|
2019-04-23 14:07:36 +02:00
|
|
|
extra_scheduler_ = 1;
|
2018-08-16 15:56:16 +02:00
|
|
|
#if TD_THREAD_UNSUPPORTED || TD_EVENTFD_UNSUPPORTED
|
2019-04-23 14:07:36 +02:00
|
|
|
extra_scheduler_ = 0;
|
2018-08-16 15:56:16 +02:00
|
|
|
#endif
|
|
|
|
|
2022-09-14 13:49:48 +02:00
|
|
|
schedulers_.resize(additional_thread_count + extra_scheduler_);
|
|
|
|
for (int32 i = 0; i < additional_thread_count + extra_scheduler_; i++) {
|
2018-12-31 20:04:05 +01:00
|
|
|
auto &sched = schedulers_[i];
|
|
|
|
sched = make_unique<Scheduler>();
|
2018-08-16 15:56:16 +02:00
|
|
|
|
2018-09-07 02:41:21 +02:00
|
|
|
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
|
2022-09-14 13:49:48 +02:00
|
|
|
if (i >= additional_thread_count) {
|
2018-08-16 15:56:16 +02:00
|
|
|
auto queue = std::make_shared<MpscPollableQueue<EventFull>>();
|
|
|
|
queue->init();
|
|
|
|
outbound.push_back(std::move(queue));
|
|
|
|
}
|
2018-09-07 02:41:21 +02:00
|
|
|
#endif
|
2018-08-16 15:56:16 +02:00
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
sched->init(i, outbound, static_cast<Scheduler::Callback *>(this));
|
|
|
|
}
|
|
|
|
|
2018-08-16 16:29:13 +02:00
|
|
|
#if TD_PORT_WINDOWS
|
2018-09-27 03:19:03 +02:00
|
|
|
iocp_ = make_unique<detail::Iocp>();
|
2018-08-16 16:29:13 +02:00
|
|
|
iocp_->init();
|
|
|
|
#endif
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
state_ = State::Start;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ConcurrentScheduler::test_one_thread_run() {
|
|
|
|
do {
|
|
|
|
for (auto &sched : schedulers_) {
|
2018-09-18 15:43:16 +02:00
|
|
|
sched->run(Timestamp::now_cached());
|
2018-12-31 20:04:05 +01:00
|
|
|
}
|
|
|
|
} while (!is_finished_.load(std::memory_order_relaxed));
|
|
|
|
}
|
|
|
|
|
|
|
|
void ConcurrentScheduler::start() {
|
|
|
|
CHECK(state_ == State::Start);
|
|
|
|
is_finished_.store(false, std::memory_order_relaxed);
|
|
|
|
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
|
2019-04-23 14:07:36 +02:00
|
|
|
for (size_t i = 1; i + extra_scheduler_ < schedulers_.size(); i++) {
|
2018-12-31 20:04:05 +01:00
|
|
|
auto &sched = schedulers_[i];
|
2022-09-14 13:49:48 +02:00
|
|
|
threads_.push_back(td::thread([&, thread_affinity_mask = thread_affinity_mask_] {
|
2018-08-16 16:29:13 +02:00
|
|
|
#if TD_PORT_WINDOWS
|
2019-09-15 05:19:46 +02:00
|
|
|
detail::Iocp::Guard iocp_guard(iocp_.get());
|
2018-08-16 16:29:13 +02:00
|
|
|
#endif
|
2022-09-17 20:58:00 +02:00
|
|
|
#if TD_HAVE_THREAD_AFFINITY
|
2022-09-14 13:49:48 +02:00
|
|
|
if (thread_affinity_mask != 0) {
|
2022-09-14 17:21:41 +02:00
|
|
|
thread::set_affinity_mask(this_thread::get_id(), thread_affinity_mask).ignore();
|
2022-09-14 13:49:48 +02:00
|
|
|
}
|
2022-09-17 20:58:00 +02:00
|
|
|
#endif
|
2018-12-31 20:04:05 +01:00
|
|
|
while (!is_finished()) {
|
2018-09-18 15:43:16 +02:00
|
|
|
sched->run(Timestamp::in(10));
|
2018-12-31 20:04:05 +01:00
|
|
|
}
|
|
|
|
}));
|
|
|
|
}
|
2018-08-16 16:29:13 +02:00
|
|
|
#if TD_PORT_WINDOWS
|
2018-09-07 02:41:21 +02:00
|
|
|
iocp_thread_ = td::thread([this] {
|
2018-08-17 11:19:21 +02:00
|
|
|
auto guard = this->get_send_guard();
|
2018-09-07 02:41:21 +02:00
|
|
|
this->iocp_->loop();
|
2018-08-17 11:19:21 +02:00
|
|
|
});
|
2018-09-07 02:41:21 +02:00
|
|
|
#endif
|
2018-08-16 16:29:13 +02:00
|
|
|
#endif
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
state_ = State::Run;
|
|
|
|
}
|
2018-09-18 15:43:16 +02:00
|
|
|
static TD_THREAD_LOCAL double emscripten_timeout;
|
2018-12-31 20:04:05 +01:00
|
|
|
|
2018-09-18 15:43:16 +02:00
|
|
|
bool ConcurrentScheduler::run_main(Timestamp timeout) {
|
2018-12-31 20:04:05 +01:00
|
|
|
CHECK(state_ == State::Run);
|
|
|
|
// run main scheduler in same thread
|
|
|
|
auto &main_sched = schedulers_[0];
|
|
|
|
if (!is_finished()) {
|
2018-08-16 16:29:13 +02:00
|
|
|
#if TD_PORT_WINDOWS
|
2019-09-15 05:19:46 +02:00
|
|
|
detail::Iocp::Guard iocp_guard(iocp_.get());
|
2018-08-16 16:29:13 +02:00
|
|
|
#endif
|
2018-12-31 20:04:05 +01:00
|
|
|
main_sched->run(timeout);
|
|
|
|
}
|
2018-09-18 15:43:16 +02:00
|
|
|
|
|
|
|
// hack for emscripten
|
|
|
|
emscripten_timeout = get_main_timeout().at();
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
return !is_finished();
|
|
|
|
}
|
|
|
|
|
2018-09-18 15:43:16 +02:00
|
|
|
Timestamp ConcurrentScheduler::get_main_timeout() {
|
|
|
|
CHECK(state_ == State::Run);
|
|
|
|
return schedulers_[0]->get_timeout();
|
|
|
|
}
|
|
|
|
|
|
|
|
double ConcurrentScheduler::emscripten_get_main_timeout() {
|
|
|
|
return Timestamp::at(emscripten_timeout).in();
|
|
|
|
}
|
|
|
|
void ConcurrentScheduler::emscripten_clear_main_timeout() {
|
|
|
|
emscripten_timeout = 0;
|
|
|
|
}
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
void ConcurrentScheduler::finish() {
|
|
|
|
CHECK(state_ == State::Run);
|
|
|
|
if (!is_finished()) {
|
|
|
|
on_finish();
|
|
|
|
}
|
2018-08-16 16:29:13 +02:00
|
|
|
#if TD_PORT_WINDOWS
|
|
|
|
SCOPE_EXIT {
|
|
|
|
iocp_->clear();
|
|
|
|
};
|
2019-09-15 05:19:46 +02:00
|
|
|
detail::Iocp::Guard iocp_guard(iocp_.get());
|
2018-08-16 16:29:13 +02:00
|
|
|
#endif
|
|
|
|
|
2020-11-22 21:30:40 +01:00
|
|
|
if (ExitGuard::is_exited()) {
|
2020-12-23 17:25:01 +01:00
|
|
|
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
|
2020-11-22 21:30:40 +01:00
|
|
|
// prevent closing of schedulers from already killed by OS threads
|
|
|
|
for (auto &thread : threads_) {
|
|
|
|
thread.detach();
|
|
|
|
}
|
2020-12-23 17:25:01 +01:00
|
|
|
#endif
|
2020-11-22 21:30:40 +01:00
|
|
|
|
|
|
|
#if TD_PORT_WINDOWS
|
|
|
|
iocp_->interrupt_loop();
|
|
|
|
iocp_thread_.detach();
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
|
|
|
|
for (auto &thread : threads_) {
|
|
|
|
thread.join();
|
|
|
|
}
|
|
|
|
threads_.clear();
|
|
|
|
#endif
|
2018-08-16 16:29:13 +02:00
|
|
|
|
|
|
|
#if TD_PORT_WINDOWS
|
|
|
|
iocp_->interrupt_loop();
|
|
|
|
iocp_thread_.join();
|
|
|
|
#endif
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
schedulers_.clear();
|
|
|
|
for (auto &f : at_finish_) {
|
|
|
|
f();
|
|
|
|
}
|
|
|
|
at_finish_.clear();
|
|
|
|
|
|
|
|
state_ = State::Start;
|
|
|
|
}
|
|
|
|
|
2020-11-23 01:24:36 +01:00
|
|
|
void ConcurrentScheduler::on_finish() {
|
|
|
|
is_finished_.store(true, std::memory_order_relaxed);
|
|
|
|
for (auto &it : schedulers_) {
|
|
|
|
it->wakeup();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ConcurrentScheduler::register_at_finish(std::function<void()> f) {
|
|
|
|
std::lock_guard<std::mutex> lock(at_finish_mutex_);
|
|
|
|
at_finish_.push_back(std::move(f));
|
|
|
|
}
|
|
|
|
|
2018-12-31 20:04:05 +01:00
|
|
|
} // namespace td
|