Merge remote-tracking branch 'td/master'

This commit is contained in:
Andrea Cavalli 2023-01-17 14:57:25 +01:00
commit 7e7c1c1bb4
90 changed files with 1061 additions and 888 deletions

View File

@ -433,6 +433,7 @@ set(TDLIB_SOURCE
td/telegram/Premium.cpp
td/telegram/PremiumGiftOption.cpp
td/telegram/QueryCombiner.cpp
td/telegram/QueryMerger.cpp
td/telegram/RecentDialogList.cpp
td/telegram/ReplyMarkup.cpp
td/telegram/ReportReason.cpp
@ -695,6 +696,7 @@ set(TDLIB_SOURCE
td/telegram/PtsManager.h
td/telegram/PublicDialogType.h
td/telegram/QueryCombiner.h
td/telegram/QueryMerger.h
td/telegram/RecentDialogList.h
td/telegram/ReplyMarkup.h
td/telegram/ReportReason.h

View File

@ -184,7 +184,7 @@ See also [telegram-flutter](https://github.com/ivk1800/telegram-flutter) - Teleg
TDLib can be used from the Rust programming language through the [JSON](https://github.com/tdlib/td#using-json) interface.
See [rust-tdlib](https://github.com/aCLr/rust-tdlib), or [tdlib](https://github.com/melix99/tdlib-rs), which provide convenient TDLib clients with automatically generated and fully-documented classes for all TDLib API methods and objects.
See [rust-tdlib](https://github.com/antonio-antuan/rust-tdlib), or [tdlib](https://github.com/melix99/tdlib-rs), which provide convenient TDLib clients with automatically generated and fully-documented classes for all TDLib API methods and objects.
See [rtdlib](https://github.com/fewensa/rtdlib), [tdlib-rs](https://github.com/d653/tdlib-rs), [tdlib-futures](https://github.com/yuri91/tdlib-futures),
[tdlib-sys](https://github.com/nuxeh/tdlib-sys), [tdjson-rs](https://github.com/mersinvald/tdjson-rs), [rust-tdlib](https://github.com/vhaoran/rust-tdlib), or [tdlib-json-sys](https://github.com/aykxt/tdlib-json-sys) for examples of TDLib Rust bindings.

View File

@ -75,8 +75,8 @@ def td_receive():
# another test for TDLib execute method
print(str(td_execute({'@type': 'getTextEntities', 'text': '@telegram /test_command https://telegram.org telegram.me', '@extra': ['5', 7.0, 'a']})).encode('utf-8'))
# start the client by sending request to it
td_send({'@type': 'getAuthorizationState', '@extra': 1.01234})
# start the client by sending a request to it
td_send({'@type': 'getOption', 'name': 'version', '@extra': 1.01234})
# main events cycle
while True:

View File

@ -85,11 +85,6 @@ namespace TdApp
AcceptCommand("LogOut");
_client.Send(new TdApi.LogOut(), _handler);
}
else if (command.StartsWith("gas"))
{
AcceptCommand(command);
_client.Send(new TdApi.GetAuthorizationState(), _handler);
}
else if (command.StartsWith("sap"))
{
var args = command.Split(" ".ToCharArray(), 2);

View File

@ -231,7 +231,7 @@ EOT
* A function to create a dynamically allocated TDLib API object. Can be treated as an analogue of std::make_unique.
* Usage example:
* \\code
* auto get_authorization_state_request = td::td_api::make_object<td::td_api::getAuthorizationState>();
* auto get_me_request = td::td_api::make_object<td::td_api::getMe>();
* auto message_text = td::td_api::make_object<td::td_api::formattedText>("Hello, world!!!",
* td::td_api::array<td::td_api::object_ptr<td::td_api::textEntity>>());
* auto send_message_request = td::td_api::make_object<td::td_api::sendMessage>(chat_id, 0, 0, nullptr, nullptr,

View File

@ -576,7 +576,7 @@ inputChatPhotoAnimation animation:InputFile main_frame_timestamp:double = InputC
//@description Describes actions that a user is allowed to take in a chat
//@can_send_messages True, if the user can send text messages, contacts, locations, and venues
//@can_send_messages True, if the user can send text messages, contacts, invoices, locations, and venues
//@can_send_media_messages True, if the user can send audio files, documents, photos, videos, video notes, and voice notes. Implies can_send_messages permissions
//@can_send_polls True, if the user can send polls. Implies can_send_messages permissions
//@can_send_other_messages True, if the user can send animations, games, stickers, and dice and use inline bots. Implies can_send_messages permissions
@ -1447,7 +1447,7 @@ replyMarkupInlineKeyboard rows:vector<vector<inlineKeyboardButton>> = ReplyMarku
//@class LoginUrlInfo @description Contains information about an inline button of type inlineKeyboardButtonTypeLoginUrl
//@description An HTTP url needs to be open @url The URL to open @skip_confirm True, if there is no need to show an ordinary open URL confirm
//@description An HTTP URL needs to be open @url The URL to open @skip_confirm True, if there is no need to show an ordinary open URL confirm
loginUrlInfoOpen url:string skip_confirm:Bool = LoginUrlInfo;
//@description An authorization confirmation dialog needs to be shown to the user
@ -2992,7 +2992,7 @@ groupCallStream channel_id:int32 scale:int32 time_offset:int53 = GroupCallStream
//@description Represents a list of group call streams @streams A list of group call streams
groupCallStreams streams:vector<groupCallStream> = GroupCallStreams;
//@description Represents an RTMP url @url The URL @stream_key Stream key
//@description Represents an RTMP URL @url The URL @stream_key Stream key
rtmpUrl url:string stream_key:string = RtmpUrl;

View File

@ -1019,7 +1019,6 @@ class TlWriterCCommon final : public tl::TL_writer {
if (is_proxy || is_header_ != 1) {
return "";
}
// return "#define CODE_" + class_name + " " + int_to_string(id) + "\n";
return "";
}

View File

@ -77,64 +77,6 @@ void IntermediateTransport::init_output_stream(ChainBufferWriter *stream) {
stream->append(Slice(reinterpret_cast<const char *>(&magic), 4));
}
size_t AbridgedTransport::read_from_stream(ChainBufferReader *stream, BufferSlice *message, uint32 *quick_ack) {
if (stream->empty()) {
return 1;
}
uint8 byte = 0;
stream->clone().advance(1, MutableSlice(&byte, 1));
size_t header_size;
uint32 data_size;
if (byte < 0x7f) {
header_size = 1;
data_size = byte * 4u;
} else {
if (stream->size() < 4) {
return 4;
}
header_size = 4;
stream->clone().advance(4, MutableSlice(reinterpret_cast<char *>(&data_size), sizeof(data_size)));
data_size >>= 8;
data_size = data_size * 4;
}
size_t total_size = header_size + data_size;
if (stream->size() < total_size) {
// optimization
// stream->make_solid(total_size);
return total_size;
}
stream->advance(header_size);
*message = stream->cut_head(data_size).move_as_buffer_slice();
return 0;
}
void AbridgedTransport::write_prepare_inplace(BufferWriter *message, bool quick_ack) {
CHECK(!quick_ack);
size_t size = message->size() / 4;
CHECK(size % 4 == 0);
CHECK(size < 1 << 24);
size_t prepend_size = size >= 0x7f ? 4 : 1;
MutableSlice prepend = message->prepare_prepend();
CHECK(prepend.size() >= prepend_size);
message->confirm_prepend(prepend_size);
MutableSlice data = message->as_slice();
if (size >= 0x7f) {
uint32 size_encoded = 0x7f + (static_cast<uint32>(size) << 8);
as<uint32>(data.begin()) = size_encoded;
} else {
as<uint8>(data.begin()) = static_cast<uint8>(size);
}
}
void AbridgedTransport::init_output_stream(ChainBufferWriter *stream) {
stream->append("\xef");
}
void ObfuscatedTransport::init(ChainBufferReader *input, ChainBufferWriter *output) {
input_ = input;
output_ = output;
@ -164,8 +106,6 @@ void ObfuscatedTransport::init(ChainBufferReader *input, ChainBufferWriter *outp
}
break;
}
// TODO: It is actually IntermediateTransport::init_output_stream, so it will work only with
// TransportImpl==IntermediateTransport
as<uint32>(header_slice.begin() + 56) = impl_.with_padding() ? 0xdddddddd : 0xeeeeeeee;
if (dc_id_ != 0) {
as<int16>(header_slice.begin() + 60) = dc_id_;

View File

@ -24,51 +24,24 @@ namespace td {
namespace mtproto {
namespace tcp {
class ITransport {
// Writes packet into message.
// Returns 0 if everything is ok, and [expected_size] otherwise.
// There is no sense to call this function when [stream->size > expected_size]
//
// (tpc is stream-base protocol. So the input message is a stream, not a slice)
virtual size_t read_from_stream(ChainBufferReader *stream, BufferSlice *message, uint32 *quick_ack) = 0;
// Writes header inplace.
virtual void write_prepare_inplace(BufferWriter *message, bool quick_ack) = 0;
// Writes first several bytes into output stream.
virtual void init_output_stream(ChainBufferWriter *stream) = 0;
virtual bool support_quick_ack() const = 0;
public:
ITransport() = default;
ITransport(const ITransport &) = delete;
ITransport &operator=(const ITransport &) = delete;
ITransport(ITransport &&) = delete;
ITransport &operator=(ITransport &&) = delete;
virtual ~ITransport() = default;
};
class AbridgedTransport final : public ITransport {
public:
size_t read_from_stream(ChainBufferReader *stream, BufferSlice *message, uint32 *quick_ack) final;
void write_prepare_inplace(BufferWriter *message, bool quick_ack) final;
void init_output_stream(ChainBufferWriter *stream) final;
bool support_quick_ack() const final {
return false;
}
};
class IntermediateTransport final : public ITransport {
class IntermediateTransport {
public:
explicit IntermediateTransport(bool with_padding) : with_padding_(with_padding) {
}
size_t read_from_stream(ChainBufferReader *stream, BufferSlice *message, uint32 *quick_ack) final;
void write_prepare_inplace(BufferWriter *message, bool quick_ack) final;
void init_output_stream(ChainBufferWriter *stream) final;
bool support_quick_ack() const final {
return true;
}
// Writes a packet into message.
// Returns 0 if everything is ok, and [expected_size] otherwise.
// There is no sense to call this function when [stream->size > expected_size]
//
// (TCP is a stream-oriented protocol, so the input message is a stream, not a slice)
size_t read_from_stream(ChainBufferReader *stream, BufferSlice *message, uint32 *quick_ack);
// Writes header inplace.
void write_prepare_inplace(BufferWriter *message, bool quick_ack);
// Writes first several bytes into output stream.
void init_output_stream(ChainBufferWriter *stream);
bool with_padding() const {
return with_padding_;
}
@ -77,8 +50,6 @@ class IntermediateTransport final : public ITransport {
bool with_padding_;
};
using TransportImpl = IntermediateTransport;
class OldTransport final : public IStreamTransport {
public:
OldTransport() = default;
@ -86,7 +57,7 @@ class OldTransport final : public IStreamTransport {
return impl_.read_from_stream(input_, message, quick_ack);
}
bool support_quick_ack() const final {
return impl_.support_quick_ack();
return true;
}
void write(BufferWriter &&message, bool quick_ack) final {
impl_.write_prepare_inplace(&message, quick_ack);
@ -121,7 +92,7 @@ class OldTransport final : public IStreamTransport {
}
private:
TransportImpl impl_{false};
IntermediateTransport impl_{false};
ChainBufferReader *input_{nullptr};
ChainBufferWriter *output_{nullptr};
};
@ -135,7 +106,7 @@ class ObfuscatedTransport final : public IStreamTransport {
Result<size_t> read_next(BufferSlice *message, uint32 *quick_ack) final TD_WARN_UNUSED_RESULT;
bool support_quick_ack() const final {
return impl_.support_quick_ack();
return true;
}
void write(BufferWriter &&message, bool quick_ack) final;
@ -182,7 +153,7 @@ class ObfuscatedTransport final : public IStreamTransport {
bool is_first_tls_packet_{true};
ProxySecret secret_;
std::string header_;
TransportImpl impl_;
IntermediateTransport impl_;
TlsReaderByteFlow tls_reader_byte_flow_;
AesCtrByteFlow aes_ctr_byte_flow_;
ByteFlowSink byte_flow_sink_;

View File

@ -162,7 +162,7 @@ class BackgroundManager final : public Actor {
FlatHashMap<BackgroundId, unique_ptr<Background>, BackgroundIdHash> backgrounds_;
FlatHashMap<BackgroundId, std::pair<int64, FileSourceId>, BackgroundIdHash>
background_id_to_file_source_id_; // id -> [access_hash, file_source_id]
background_id_to_file_source_id_; // background_id -> [access_hash, file_source_id]
FlatHashMap<string, BackgroundId> name_to_background_id_;

View File

@ -199,7 +199,7 @@ class ClientManager final {
* if (response.id == 0) {
* // process response.object as an incoming update of type td_api::Update
* } else {
* // process response.object as an answer to a sent request with id response.id
* // process response.object as an answer to a sent request with identifier response.id
* }
* }
* \endcode

View File

@ -521,8 +521,8 @@ static ActorOwn<> get_full_config(DcOption option, Promise<tl_object_ptr<telegra
int_dc_id += 10000;
}
session_ = create_actor<Session>("ConfigSession", std::move(session_callback), std::move(auth_data), raw_dc_id,
int_dc_id, false /*is_main*/, true /*use_pfs*/, false /*is_cdn*/,
false /*need_destroy_auth_key*/, mtproto::AuthKey(),
int_dc_id, false /*is_primary*/, false /*is_main*/, true /*use_pfs*/,
false /*is_cdn*/, false /*need_destroy_auth_key*/, mtproto::AuthKey(),
std::vector<mtproto::ServerSalt>());
auto query = G()->net_query_creator().create_unauth(telegram_api::help_getConfig(), DcId::empty());
query->total_timeout_limit_ = 60 * 60 * 24;
@ -823,9 +823,9 @@ class ConfigRecoverer final : public Actor {
case 2:
return get_simple_config_firebase_remote_config;
case 4:
return get_simple_config_firebase_realtime;
case 9:
return get_simple_config_firebase_firestore;
case 9:
return get_simple_config_firebase_realtime;
case 0:
case 3:
case 8:

View File

@ -3916,6 +3916,24 @@ ContactsManager::ContactsManager(Td *td, ActorShared<> parent) : td_(td), parent
channel_participant_cache_timeout_.set_callback(on_channel_participant_cache_timeout_callback);
channel_participant_cache_timeout_.set_callback_data(static_cast<void *>(this));
get_user_queries_.set_merge_function([this](vector<int64> query_ids, Promise<Unit> &&promise) {
auto input_users = transform(query_ids, [this](int64 query_id) {
auto r_input_user = get_input_user(UserId(query_id));
CHECK(r_input_user.is_ok());
return r_input_user.move_as_ok();
});
td_->create_handler<GetUsersQuery>(std::move(promise))->send(std::move(input_users));
});
get_chat_queries_.set_merge_function([this](vector<int64> query_ids, Promise<Unit> &&promise) {
td_->create_handler<GetChatsQuery>(std::move(promise))->send(std::move(query_ids));
});
get_channel_queries_.set_merge_function([this](vector<int64> query_ids, Promise<Unit> &&promise) {
CHECK(query_ids.size() == 1);
auto input_channel = get_input_channel(ChannelId(query_ids[0]));
CHECK(input_channel != nullptr);
td_->create_handler<GetChannelsQuery>(std::move(promise))->send(std::move(input_channel));
});
}
ContactsManager::~ContactsManager() {
@ -6040,7 +6058,8 @@ int64 ContactsManager::get_contacts_hash() {
}
void ContactsManager::reload_contacts(bool force) {
if (!td_->auth_manager_->is_bot() && next_contacts_sync_date_ != std::numeric_limits<int32>::max() &&
if (!G()->close_flag() && !td_->auth_manager_->is_bot() &&
next_contacts_sync_date_ != std::numeric_limits<int32>::max() &&
(next_contacts_sync_date_ < G()->unix_time() || force)) {
next_contacts_sync_date_ = std::numeric_limits<int32>::max();
td_->create_handler<GetContactsQuery>()->send(get_contacts_hash());
@ -9534,6 +9553,9 @@ void ContactsManager::on_load_contacts_from_database(string value) {
[actor_id = actor_id(this), expected_contact_count = user_ids.size()](Result<Unit> result) {
if (result.is_ok()) {
send_closure(actor_id, &ContactsManager::on_get_contacts_finished, expected_contact_count);
} else {
LOG(INFO) << "Failed to load contact users from database: " << result.error();
send_closure(actor_id, &ContactsManager::reload_contacts, true);
}
}));
@ -15804,7 +15826,6 @@ bool ContactsManager::get_user(UserId user_id, int left_tries, Promise<Unit> &&p
}
if (td_->auth_manager_->is_bot() ? !have_user(user_id) : !have_min_user(user_id)) {
// TODO UserLoader
if (left_tries > 2 && G()->parameters().use_chat_info_db) {
send_closure_later(actor_id(this), &ContactsManager::load_user_from_database, nullptr, user_id,
std::move(promise));
@ -15820,9 +15841,7 @@ bool ContactsManager::get_user(UserId user_id, int left_tries, Promise<Unit> &&p
return false;
}
vector<tl_object_ptr<telegram_api::InputUser>> users;
users.push_back(r_input_user.move_as_ok());
td_->create_handler<GetUsersQuery>(std::move(promise))->send(std::move(users));
get_user_queries_.add_query(user_id.get(), std::move(promise));
return false;
}
@ -15867,10 +15886,7 @@ void ContactsManager::reload_user(UserId user_id, Promise<Unit> &&promise) {
return promise.set_error(r_input_user.move_as_error());
}
// there is no much reason to combine different requests into one request
vector<tl_object_ptr<telegram_api::InputUser>> users;
users.push_back(r_input_user.move_as_ok());
td_->create_handler<GetUsersQuery>(std::move(promise))->send(std::move(users));
get_user_queries_.add_query(user_id.get(), std::move(promise));
}
void ContactsManager::load_user_full(UserId user_id, bool force, Promise<Unit> &&promise, const char *source) {
@ -16124,7 +16140,7 @@ bool ContactsManager::get_chat(ChatId chat_id, int left_tries, Promise<Unit> &&p
}
if (left_tries > 1) {
td_->create_handler<GetChatsQuery>(std::move(promise))->send(vector<int64>{chat_id.get()});
get_chat_queries_.add_query(chat_id.get(), std::move(promise));
return false;
}
@ -16141,8 +16157,7 @@ void ContactsManager::reload_chat(ChatId chat_id, Promise<Unit> &&promise) {
return promise.set_error(Status::Error(400, "Invalid basic group identifier"));
}
// there is no much reason to combine different requests into one request
td_->create_handler<GetChatsQuery>(std::move(promise))->send(vector<int64>{chat_id.get()});
get_chat_queries_.add_query(chat_id.get(), std::move(promise));
}
const ContactsManager::ChatFull *ContactsManager::get_chat_full(ChatId chat_id) const {
@ -16537,7 +16552,7 @@ bool ContactsManager::get_channel(ChannelId channel_id, int left_tries, Promise<
}
if (left_tries > 1 && td_->auth_manager_->is_bot()) {
td_->create_handler<GetChannelsQuery>(std::move(promise))->send(get_input_channel(channel_id));
get_channel_queries_.add_query(channel_id.get(), std::move(promise));
return false;
}
@ -16560,9 +16575,8 @@ void ContactsManager::reload_channel(ChannelId channel_id, Promise<Unit> &&promi
input_channel = make_tl_object<telegram_api::inputChannel>(channel_id.get(), 0);
}
// there is no much reason to combine different requests into one request
// requests with 0 access_hash must not be merged
td_->create_handler<GetChannelsQuery>(std::move(promise))->send(std::move(input_channel));
get_channel_queries_.add_query(channel_id.get(), std::move(promise));
}
const ContactsManager::ChannelFull *ContactsManager::get_channel_full_const(ChannelId channel_id) const {
@ -17442,7 +17456,7 @@ void ContactsManager::on_chat_update(telegram_api::chat &chat, const char *sourc
update_channel(c, migrated_to_channel_id);
// get info about the channel
td_->create_handler<GetChannelsQuery>(Promise<>())->send(std::move(input_channel));
get_channel_queries_.add_query(migrated_to_channel_id.get(), Promise<Unit>());
}
}
break;

View File

@ -32,6 +32,7 @@
#include "td/telegram/PremiumGiftOption.h"
#include "td/telegram/PublicDialogType.h"
#include "td/telegram/QueryCombiner.h"
#include "td/telegram/QueryMerger.h"
#include "td/telegram/RestrictionReason.h"
#include "td/telegram/SecretChatId.h"
#include "td/telegram/StickerSetId.h"
@ -726,7 +727,7 @@ class ContactsManager final : public Actor {
FlatHashSet<int64> photo_ids;
FlatHashMap<DialogId, int32, DialogIdHash> online_member_dialogs; // id -> time
FlatHashMap<DialogId, int32, DialogIdHash> online_member_dialogs; // dialog_id -> time
static constexpr uint32 CACHE_VERSION = 4;
uint32 cache_version = 0;
@ -1882,6 +1883,10 @@ class ContactsManager final : public Actor {
FlatHashMap<SecretChatId, vector<Promise<Unit>>, SecretChatIdHash> load_secret_chat_from_database_queries_;
FlatHashSet<SecretChatId, SecretChatIdHash> loaded_from_database_secret_chats_;
QueryMerger get_user_queries_{"GetUserMerger", 3, 50};
QueryMerger get_chat_queries_{"GetChatMerger", 3, 50};
QueryMerger get_channel_queries_{"GetChannelMerger", 100, 1}; // can't merge getChannel queries without access hash
QueryCombiner get_user_full_queries_{"GetUserFullCombiner", 2.0};
QueryCombiner get_chat_full_queries_{"GetChatFullCombiner", 2.0};

View File

@ -1683,7 +1683,7 @@ void InlineQueriesManager::on_get_inline_query_results(DialogId dialog_id, UserI
article->hide_url_ = true;
} else {
LOG_IF(ERROR, result->url_ != article->url_)
<< "Url has changed from " << article->url_ << " to " << result->url_;
<< "URL has changed from " << article->url_ << " to " << result->url_;
article->hide_url_ = false;
}
article->title_ = std::move(result->title_);

View File

@ -136,13 +136,13 @@ Result<InputInvoice> InputInvoice::process_input_message_invoice(
auto r_http_url = parse_url(input_invoice->photo_url_);
if (r_http_url.is_error()) {
if (!input_invoice->photo_url_.empty()) {
LOG(INFO) << "Can't register url " << input_invoice->photo_url_;
LOG(INFO) << "Can't register URL " << input_invoice->photo_url_;
}
} else {
auto url = r_http_url.ok().get_url();
auto r_invoice_file_id = td->file_manager_->from_persistent_id(url, FileType::Temp);
if (r_invoice_file_id.is_error()) {
LOG(INFO) << "Can't register url " << url;
LOG(INFO) << "Can't register URL " << url;
} else {
auto invoice_file_id = r_invoice_file_id.move_as_ok();

View File

@ -1688,7 +1688,7 @@ Result<LanguagePackManager::LanguageInfo> LanguagePackManager::get_language_info
return Status::Error(400, "Language pack plural code must be encoded in UTF-8");
}
if (!clean_input_string(language_pack_info->translation_url_)) {
return Status::Error(400, "Language pack translation url must be encoded in UTF-8");
return Status::Error(400, "Language pack translation URL must be encoded in UTF-8");
}
if (language_pack_info->total_string_count_ < 0) {
language_pack_info->total_string_count_ = 0;

View File

@ -565,7 +565,7 @@ class MessageDbImpl final : public MessageDbSyncInterface {
prev_found_message_id = message_id;
}
// left_message_id is always an id of suitable message, let's return it
// left_message_id is always an identifier of suitable message, let's return it
return get_message({dialog_id, MessageId(left_message_id)});
}
}

View File

@ -943,13 +943,11 @@ void send_set_default_reaction_query(Td *td) {
td->create_handler<SetDefaultReactionQuery>()->send(td->option_manager_->get_option_string("default_reaction"));
}
void send_update_default_reaction_type(const string &default_reaction) {
td_api::object_ptr<td_api::updateDefaultReactionType> get_update_default_reaction_type(const string &default_reaction) {
if (default_reaction.empty()) {
LOG(ERROR) << "Have no default reaction";
return;
return nullptr;
}
send_closure(G()->td(), &Td::send_update,
td_api::make_object<td_api::updateDefaultReactionType>(get_reaction_type_object(default_reaction)));
return td_api::make_object<td_api::updateDefaultReactionType>(get_reaction_type_object(default_reaction));
}
void report_message_reactions(Td *td, FullMessageId full_message_id, DialogId chooser_dialog_id,

View File

@ -224,7 +224,7 @@ void set_default_reaction(Td *td, string reaction, Promise<Unit> &&promise);
void send_set_default_reaction_query(Td *td);
void send_update_default_reaction_type(const string &default_reaction);
td_api::object_ptr<td_api::updateDefaultReactionType> get_update_default_reaction_type(const string &default_reaction);
void report_message_reactions(Td *td, FullMessageId full_message_id, DialogId chooser_dialog_id,
Promise<Unit> &&promise);

View File

@ -4722,7 +4722,7 @@ class GetChannelDifferenceQuery final : public Td::ResultHandler {
void on_error(Status status) final {
if (!td_->messages_manager_->on_get_dialog_error(dialog_id_, status, "GetChannelDifferenceQuery") &&
status.message() != "PERSISTENT_TIMESTAMP_INVALID") {
LOG(ERROR) << "Receive error for GetChannelDifferenceQuery for " << dialog_id_ << " with pts " << pts_
LOG(ERROR) << "Receive error for GetChannelDifferenceQuery for " << dialog_id_ << " with PTS " << pts_
<< " and limit " << limit_ << ": " << status;
}
td_->messages_manager_->on_get_channel_difference(dialog_id_, pts_, limit_, nullptr);
@ -6706,7 +6706,7 @@ void MessagesManager::skip_old_pending_pts_update(tl_object_ptr<telegram_api::Up
auto full_message_id = FullMessageId::get_full_message_id(update_new_message->message_, false);
if (update_message_ids_.count(full_message_id) > 0) {
if (new_pts == old_pts || old_pts == std::numeric_limits<int32>::max()) {
// apply sent message anyway if it is definitely non-deleted or being skipped because of pts overflow
// apply sent message anyway if it is definitely non-deleted or being skipped because of PTS overflow
auto added_full_message_id = on_get_message(std::move(update_new_message->message_), true, false, false, true,
true, "updateNewMessage with an awaited message");
if (added_full_message_id != full_message_id) {
@ -6714,8 +6714,8 @@ void MessagesManager::skip_old_pending_pts_update(tl_object_ptr<telegram_api::Up
}
return;
} else {
LOG(ERROR) << "Receive awaited sent " << full_message_id << " from " << source << " with pts " << new_pts
<< " and pts_count " << pts_count << ", but current pts is " << old_pts;
LOG(ERROR) << "Receive awaited sent " << full_message_id << " from " << source << " with PTS " << new_pts
<< " and pts_count " << pts_count << ", but current PTS is " << old_pts;
dump_debug_message_op(get_dialog(full_message_id.get_dialog_id()), 3);
}
}
@ -6724,14 +6724,14 @@ void MessagesManager::skip_old_pending_pts_update(tl_object_ptr<telegram_api::Up
auto update_sent_message = static_cast<updateSentMessage *>(update.get());
if (being_sent_messages_.count(update_sent_message->random_id_) > 0) {
if (new_pts == old_pts || old_pts == std::numeric_limits<int32>::max()) {
// apply sent message anyway if it is definitely non-deleted or being skipped because of pts overflow
// apply sent message anyway if it is definitely non-deleted or being skipped because of PTS overflow
on_send_message_success(update_sent_message->random_id_, update_sent_message->message_id_,
update_sent_message->date_, update_sent_message->ttl_period_, FileId(),
"process old updateSentMessage");
return;
} else if (update_sent_message->random_id_ != 0) {
LOG(ERROR) << "Receive awaited sent " << update_sent_message->message_id_ << " from " << source << " with pts "
<< new_pts << " and pts_count " << pts_count << ", but current pts is " << old_pts;
LOG(ERROR) << "Receive awaited sent " << update_sent_message->message_id_ << " from " << source << " with PTS "
<< new_pts << " and pts_count " << pts_count << ", but current PTS is " << old_pts;
dump_debug_message_op(get_dialog(being_sent_messages_[update_sent_message->random_id_].get_dialog_id()), 3);
}
}
@ -7949,7 +7949,7 @@ void MessagesManager::add_pending_channel_update(DialogId dialog_id, tl_object_p
return;
}
if (pts_count < 0 || new_pts <= pts_count) {
LOG(ERROR) << "Receive channel update from " << source << " with wrong pts = " << new_pts
LOG(ERROR) << "Receive channel update from " << source << " with wrong PTS = " << new_pts
<< " or pts_count = " << pts_count << ": " << oneline(to_string(update));
promise.set_value(Unit());
return;
@ -7962,7 +7962,7 @@ void MessagesManager::add_pending_channel_update(DialogId dialog_id, tl_object_p
return;
}
// TODO need to save all updates that can change result of running queries not associated with pts (for example
// TODO need to save all updates that can change result of running queries not associated with PTS (for example
// getHistory) and apply them to result of these queries
Dialog *d = get_dialog_force(dialog_id, "add_pending_channel_update 2");
@ -7977,14 +7977,14 @@ void MessagesManager::add_pending_channel_update(DialogId dialog_id, tl_object_p
}
if (new_pts <= pts && new_pts >= pts - 19999) {
LOG(INFO) << "There is no need to process an update with pts " << new_pts << " in " << dialog_id << " with pts "
LOG(INFO) << "There is no need to process an update with PTS " << new_pts << " in " << dialog_id << " with PTS "
<< pts;
promise.set_value(Unit());
return;
}
if (new_pts > pts && pts != new_pts - pts_count) {
LOG(INFO) << "Found a gap in unknown " << dialog_id << " with pts = " << pts << ". new_pts = " << new_pts
LOG(INFO) << "Found a gap in unknown " << dialog_id << " with PTS = " << pts << ". new_pts = " << new_pts
<< ", pts_count = " << pts_count << " in update from " << source;
add_postponed_channel_update(dialog_id, std::move(update), new_pts, pts_count, std::move(promise));
get_channel_difference(dialog_id, pts, true, "add_pending_channel_update 3");
@ -8049,12 +8049,12 @@ void MessagesManager::add_pending_channel_update(DialogId dialog_id, tl_object_p
}
if (old_pts != new_pts - pts_count) {
LOG(INFO) << "Found a gap in the " << dialog_id << " with pts = " << old_pts << ". new_pts = " << new_pts
LOG(INFO) << "Found a gap in the " << dialog_id << " with PTS = " << old_pts << ". new_pts = " << new_pts
<< ", pts_count = " << pts_count << " in update from " << source;
if (d->was_opened || td_->contacts_manager_->get_channel_status(channel_id).is_member() ||
is_dialog_sponsored(d)) {
add_postponed_channel_update(dialog_id, std::move(update), new_pts, pts_count, std::move(promise));
get_channel_difference(dialog_id, old_pts, true, "add_pending_channel_update pts mismatch");
get_channel_difference(dialog_id, old_pts, true, "add_pending_channel_update PTS mismatch");
} else {
promise.set_value(Unit());
}
@ -9766,7 +9766,7 @@ void MessagesManager::after_get_difference() {
for (auto &it : update_message_ids_) {
// there can be unhandled updateMessageId updates after getDifference even for ordinary chats,
// because despite updates coming during getDifference have already been applied,
// some of them could be postponed because of pts gap
// some of them could be postponed because of PTS gap
auto full_message_id = it.first;
auto dialog_id = full_message_id.get_dialog_id();
auto message_id = full_message_id.get_message_id();
@ -12735,7 +12735,7 @@ void MessagesManager::read_history_outbox(DialogId dialog_id, MessageId max_mess
return;
}
// it is impossible for just sent outgoing messages because updates are ordered by pts
// it is impossible for just sent outgoing messages because updates are ordered by PTS
if (d->last_new_message_id.is_valid() && max_message_id > d->last_new_message_id &&
dialog_id.get_type() != DialogType::Channel) {
LOG(INFO) << "Receive read outbox update about unknown " << max_message_id << " in " << dialog_id
@ -13690,8 +13690,6 @@ void MessagesManager::init() {
td_->notification_settings_manager_->init(); // load scope notification settings
init_stickers_manager(td_); // load available reactions
always_wait_for_mailbox();
start_time_ = Time::now();
last_channel_pts_jump_warning_time_ = start_time_ - 3600;
@ -14842,7 +14840,7 @@ std::pair<DialogId, unique_ptr<MessagesManager::Message>> MessagesManager::creat
is_outgoing = supposed_to_be_outgoing;
/*
// it is useless to call getChannelDifference, because the channel pts will be increased already
// it is useless to call getChannelDifference, because the channel PTS will be increased already
if (dialog_type == DialogType::Channel && !running_get_difference_ && !running_get_channel_difference(dialog_id) &&
get_channel_difference_to_log_event_id_.count(dialog_id) == 0) {
// it is safer to completely ignore the message and re-get it through getChannelDifference
@ -15933,16 +15931,16 @@ void MessagesManager::on_get_dialogs(FolderId folder_id, vector<tl_object_ptr<te
case DialogType::User:
case DialogType::Chat:
if (has_pts) {
LOG(ERROR) << "Receive user or group " << dialog_id << " with pts";
LOG(ERROR) << "Receive user or group " << dialog_id << " with PTS";
return promise.set_error(
Status::Error(500, "Wrong query result returned: receive user or basic group chat with pts"));
Status::Error(500, "Wrong query result returned: receive user or basic group chat with PTS"));
}
break;
case DialogType::Channel:
if (!has_pts) {
LOG(ERROR) << "Receive channel " << dialog_id << " without pts";
LOG(ERROR) << "Receive channel " << dialog_id << " without PTS";
return promise.set_error(
Status::Error(500, "Wrong query result returned: receive supergroup chat without pts"));
Status::Error(500, "Wrong query result returned: receive supergroup chat without PTS"));
}
break;
case DialogType::SecretChat:
@ -18676,13 +18674,17 @@ void MessagesManager::get_message_thread(DialogId dialog_id, MessageId message_i
return promise.set_error(Status::Error(400, "Scheduled messages can't have message threads"));
}
FullMessageId top_thread_full_message_id;
if (message_id == MessageId(ServerMessageId(1)) && is_forum_channel(dialog_id)) {
top_thread_full_message_id = FullMessageId{dialog_id, message_id};
} else {
message_id = get_persistent_message_id(d, message_id);
auto m = get_message_force(d, message_id, "get_message_thread");
if (m == nullptr) {
return promise.set_error(Status::Error(400, "Message not found"));
}
TRY_RESULT_PROMISE(promise, top_thread_full_message_id, get_top_thread_full_message_id(dialog_id, m, true));
TRY_RESULT_PROMISE_ASSIGN(promise, top_thread_full_message_id, get_top_thread_full_message_id(dialog_id, m, true));
if ((m->reply_info.is_empty() || !m->reply_info.is_comment_) &&
top_thread_full_message_id.get_message_id() != m->message_id) {
CHECK(dialog_id == top_thread_full_message_id.get_dialog_id());
@ -18690,6 +18692,7 @@ void MessagesManager::get_message_thread(DialogId dialog_id, MessageId message_i
message_id = top_thread_full_message_id.get_message_id();
CHECK(message_id.is_valid());
}
}
auto query_promise = PromiseCreator::lambda([actor_id = actor_id(this), dialog_id, message_id,
promise = std::move(promise)](Result<MessageThreadInfo> result) mutable {
@ -18801,6 +18804,9 @@ void MessagesManager::on_get_discussion_message(DialogId dialog_id, MessageId me
return promise.set_error(Status::Error(400, "Message has no comments"));
}
expected_dialog_id = DialogId(m->reply_info.channel_id_);
} else if (message_id == MessageId(ServerMessageId(1)) && is_forum_channel(dialog_id)) {
// General forum topic
expected_dialog_id = dialog_id;
} else {
if (!m->top_thread_message_id.is_valid()) {
return promise.set_error(Status::Error(400, "Message has no thread"));
@ -18854,6 +18860,9 @@ td_api::object_ptr<td_api::messageThreadInfo> MessagesManager::get_message_threa
}
if (messages.size() != 1) {
is_forum_topic = false;
} else if (info.message_ids[0] == MessageId(ServerMessageId(1)) && is_forum_channel(info.dialog_id)) {
// General forum topic
is_forum_topic = true;
}
if (reply_info == nullptr && !is_forum_topic) {
return nullptr;
@ -24092,7 +24101,7 @@ void MessagesManager::on_get_affected_history(DialogId dialog_id, AffectedHistor
Promise<Unit> &&promise) {
TRY_STATUS_PROMISE(promise, G()->close_status());
LOG(INFO) << "Receive " << (affected_history.is_final_ ? "final " : "partial ")
<< "affected history with pts = " << affected_history.pts_
<< "affected history with PTS = " << affected_history.pts_
<< " and pts_count = " << affected_history.pts_count_;
if (affected_history.pts_count_ > 0) {
@ -25920,7 +25929,7 @@ MessageId MessagesManager::get_persistent_message_id(const Dialog *d, MessageId
return MessageId();
}
if (message_id.is_yet_unsent()) {
// it is possible that user tries to do something with an already sent message by its temporary id
// it is possible that user tries to do something with an already sent message by its temporary identifier
// we need to use real message in this case and transparently replace message_id
auto it = d->yet_unsent_message_id_to_persistent_message_id.find(message_id);
if (it != d->yet_unsent_message_id_to_persistent_message_id.end()) {
@ -28066,8 +28075,8 @@ void MessagesManager::on_message_media_edited(DialogId dialog_id, MessageId mess
// updateMessageContent was already sent and needs to be sent again,
// only if 'i' and 't' sizes from edited_content were added to the photo
auto pts = result.ok();
LOG(INFO) << "Successfully edited " << message_id << " in " << dialog_id << " with pts = " << pts
<< " and last edit pts = " << m->last_edit_pts;
LOG(INFO) << "Successfully edited " << message_id << " in " << dialog_id << " with PTS = " << pts
<< " and last edit PTS = " << m->last_edit_pts;
std::swap(m->content, m->edited_content);
bool need_send_update_message_content = m->edited_content->get_type() == MessageContentType::Photo &&
m->content->get_type() == MessageContentType::Photo;
@ -32132,7 +32141,7 @@ FullMessageId MessagesManager::on_send_message_success(int64 random_id, MessageI
auto it = being_sent_messages_.find(random_id);
if (it == being_sent_messages_.end()) {
LOG(ERROR) << "Result from sendMessage for " << new_message_id << " with random_id " << random_id << " sent at "
<< date << " comes from " << source << " after updateNewMessageId, but was not discarded by pts";
<< date << " comes from " << source << " after updateNewMessageId, but was not discarded by PTS";
return {};
}
@ -39076,7 +39085,7 @@ int32 MessagesManager::load_channel_pts(DialogId dialog_id) const {
return 0;
}
auto pts = to_integer<int32>(G()->td_db()->get_binlog_pmc()->get(get_channel_pts_key(dialog_id)));
LOG(INFO) << "Load " << dialog_id << " pts = " << pts;
LOG(INFO) << "Load " << dialog_id << " PTS = " << pts;
return pts;
}
@ -39085,7 +39094,7 @@ void MessagesManager::set_channel_pts(Dialog *d, int32 new_pts, const char *sour
CHECK(d->dialog_id.get_type() == DialogType::Channel);
LOG_IF(ERROR, running_get_channel_difference(d->dialog_id))
<< "Set pts of " << d->dialog_id << " to " << new_pts << " from " << source
<< "Set PTS of " << d->dialog_id << " to " << new_pts << " from " << source
<< " while running getChannelDifference";
if (is_debug_message_op_enabled()) {
@ -39094,7 +39103,7 @@ void MessagesManager::set_channel_pts(Dialog *d, int32 new_pts, const char *sour
// TODO delete_first_messages support in channels
if (new_pts == std::numeric_limits<int32>::max()) {
LOG(ERROR) << "Update " << d->dialog_id << " pts to -1 from " << source;
LOG(ERROR) << "Update " << d->dialog_id << " PTS to -1 from " << source;
G()->td_db()->get_binlog_pmc()->erase(get_channel_pts_key(d->dialog_id));
d->pts = std::numeric_limits<int32>::max();
if (d->pending_read_channel_inbox_pts != 0) {
@ -39102,12 +39111,12 @@ void MessagesManager::set_channel_pts(Dialog *d, int32 new_pts, const char *sour
}
return;
}
if (new_pts > d->pts || (0 < new_pts && new_pts < d->pts - 99999)) { // pts can only go up or drop cardinally
if (new_pts > d->pts || (0 < new_pts && new_pts < d->pts - 99999)) { // PTS can only go up or drop cardinally
if (new_pts < d->pts - 99999) {
LOG(WARNING) << "Pts of " << d->dialog_id << " decreases from " << d->pts << " to " << new_pts << " from "
LOG(WARNING) << "PTS of " << d->dialog_id << " decreases from " << d->pts << " to " << new_pts << " from "
<< source;
} else {
LOG(INFO) << "Update " << d->dialog_id << " pts to " << new_pts << " from " << source;
LOG(INFO) << "Update " << d->dialog_id << " PTS to " << new_pts << " from " << source;
}
d->pts = new_pts;
@ -39126,7 +39135,7 @@ void MessagesManager::set_channel_pts(Dialog *d, int32 new_pts, const char *sour
G()->td_db()->get_binlog_pmc()->set(get_channel_pts_key(d->dialog_id), to_string(new_pts));
}
} else if (new_pts < d->pts) {
LOG(ERROR) << "Receive wrong pts " << new_pts << " in " << d->dialog_id << " from " << source << ". Current pts is "
LOG(ERROR) << "Receive wrong PTS " << new_pts << " in " << d->dialog_id << " from " << source << ". Current PTS is "
<< d->pts;
}
}
@ -39282,7 +39291,7 @@ void MessagesManager::do_get_channel_difference(DialogId dialog_id, int32 pts, b
limit = MIN_CHANNEL_DIFFERENCE;
}
LOG(INFO) << "-----BEGIN GET CHANNEL DIFFERENCE----- for " << dialog_id << " with pts " << pts << " and limit "
LOG(INFO) << "-----BEGIN GET CHANNEL DIFFERENCE----- for " << dialog_id << " with PTS " << pts << " and limit "
<< limit << " from " << source;
td_->create_handler<GetChannelDifferenceQuery>()->send(dialog_id, std::move(input_channel), pts, limit, force);
@ -39604,7 +39613,7 @@ void MessagesManager::on_get_channel_difference(
channel_get_difference_retry_timeouts_.erase(dialog_id);
LOG(INFO) << "Receive result of getChannelDifference for " << dialog_id << " with pts = " << request_pts
LOG(INFO) << "Receive result of getChannelDifference for " << dialog_id << " with PTS = " << request_pts
<< " and limit = " << request_limit << " from " << source << ": " << to_string(difference_ptr);
bool have_new_messages = false;
@ -39658,18 +39667,18 @@ void MessagesManager::on_get_channel_difference(
int32 flags = difference->flags_;
is_final = (flags & CHANNEL_DIFFERENCE_FLAG_IS_FINAL) != 0;
LOG_IF(ERROR, !is_final) << "Receive channelDifferenceEmpty as result of getChannelDifference from " << source
<< " with pts = " << request_pts << " and limit = " << request_limit << " in "
<< " with PTS = " << request_pts << " and limit = " << request_limit << " in "
<< dialog_id << ", but it is not final";
if (flags & CHANNEL_DIFFERENCE_FLAG_HAS_TIMEOUT) {
timeout = difference->timeout_;
}
// bots can receive channelDifferenceEmpty with pts bigger than known pts
// bots can receive channelDifferenceEmpty with PTS bigger than known PTS
// also, this can happen for deleted channels
if (request_pts != difference->pts_ && !td_->auth_manager_->is_bot() &&
have_input_peer(dialog_id, AccessRights::Read)) {
LOG(ERROR) << "Receive channelDifferenceEmpty as result of getChannelDifference from " << source
<< " with pts = " << request_pts << " and limit = " << request_limit << " in " << dialog_id
<< " with PTS = " << request_pts << " and limit = " << request_limit << " in " << dialog_id
<< ", but PTS has changed to " << difference->pts_;
}
set_channel_pts(d, difference->pts_, "channel difference empty");
@ -39687,7 +39696,7 @@ void MessagesManager::on_get_channel_difference(
auto new_pts = difference->pts_;
if (request_pts >= new_pts && request_pts > 1 && (request_pts > new_pts || !td_->auth_manager_->is_bot())) {
LOG(ERROR) << "Receive channelDifference as result of getChannelDifference from " << source
<< " with pts = " << request_pts << " and limit = " << request_limit << " in " << dialog_id
<< " with PTS = " << request_pts << " and limit = " << request_limit << " in " << dialog_id
<< ", but PTS has changed from " << d->pts << " to " << new_pts
<< ". Difference: " << oneline(to_string(difference));
new_pts = request_pts + 1;
@ -39700,7 +39709,7 @@ void MessagesManager::on_get_channel_difference(
auto message_id = MessageId::get_message_id(message, false);
if (message_id <= cur_message_id) {
LOG(ERROR) << "Receive " << cur_message_id << " after " << message_id << " in channelDifference of "
<< dialog_id << " from " << source << " with pts " << request_pts << " and limit "
<< dialog_id << " from " << source << " with PTS " << request_pts << " and limit "
<< request_limit << ": " << to_string(difference);
after_get_channel_difference(dialog_id, false);
return;
@ -39738,7 +39747,7 @@ void MessagesManager::on_get_channel_difference(
CHECK(dialog != nullptr);
if ((dialog->flags_ & telegram_api::dialog::PTS_MASK) == 0) {
LOG(ERROR) << "Receive " << dialog_id << " without pts";
LOG(ERROR) << "Receive " << dialog_id << " without PTS";
return after_get_channel_difference(dialog_id, false);
}
@ -39751,7 +39760,7 @@ void MessagesManager::on_get_channel_difference(
auto new_pts = dialog->pts_;
if (request_pts > new_pts - request_limit) {
LOG(ERROR) << "Receive channelDifferenceTooLong as result of getChannelDifference from " << source
<< " with pts = " << request_pts << " and limit = " << request_limit << " in " << dialog_id
<< " with PTS = " << request_pts << " and limit = " << request_limit << " in " << dialog_id
<< ", but PTS has changed from " << d->pts << " to " << new_pts
<< ". Difference: " << oneline(to_string(difference));
if (request_pts >= new_pts) {
@ -39848,13 +39857,13 @@ void MessagesManager::after_get_channel_difference(DialogId dialog_id, bool succ
}
if (updates.size() != old_size || running_get_channel_difference(dialog_id)) {
if (success && update_pts - 10000 < pts && update_pts_count == 1) {
// if getChannelDifference was successful and update pts is near channel pts,
// if getChannelDifference was successful and update PTS is near channel PTS,
// we hope that the update eventually can be applied
LOG(INFO) << "Can't apply postponed channel updates";
} else {
// otherwise protect from getChannelDifference repeating calls by dropping postponed updates
LOG(WARNING) << "Failed to apply postponed updates of type " << update_id << " in " << dialog_id
<< " with pts " << pts << ", update pts is " << update_pts << ", update pts count is "
<< " with PTS " << pts << ", update PTS is " << update_pts << ", update PTS count is "
<< update_pts_count;
vector<Promise<Unit>> update_promises;
for (auto &postponed_update : updates) {

View File

@ -1600,7 +1600,7 @@ class MessagesManager final : public Actor {
protected:
MessagesIteratorBase() = default;
// points iterator to message with greatest id which is less or equal than message_id
// points iterator to message with greatest identifier which is less or equal than message_id
MessagesIteratorBase(const Message *root, MessageId message_id) {
size_t last_right_pos = 0;
while (root != nullptr) {

View File

@ -62,19 +62,10 @@ OptionManager::OptionManager(Td *td)
if (!is_internal_option(name)) {
send_closure(G()->td(), &Td::send_update,
td_api::make_object<td_api::updateOption>(name, get_option_value_object(name_value.second)));
} else if (name == "otherwise_relogin_days") {
auto days = narrow_cast<int32>(get_option_integer(name));
if (days > 0) {
vector<SuggestedAction> added_actions{SuggestedAction{SuggestedAction::Type::SetPassword, DialogId(), days}};
send_closure(G()->td(), &Td::send_update, get_update_suggested_actions_object(added_actions, {}));
}
} else if (name == "default_reaction") {
auto value = get_option_string(name);
if (value.empty()) {
// legacy
set_option_empty(name);
} else {
send_update_default_reaction_type(value);
auto update = get_internal_option_update(name);
if (update != nullptr) {
send_closure(G()->td(), &Td::send_update, std::move(update));
}
}
}
@ -198,7 +189,7 @@ void OptionManager::set_option(Slice name, Slice value) {
CHECK(!name.empty());
CHECK(Scheduler::instance()->sched_id() == current_scheduler_id_);
if (value.empty()) {
if (option_pmc_->erase(name.str()) == 0) {
if (options_->erase(name.str()) == 0) {
return;
}
option_pmc_->erase(name.str());
@ -216,6 +207,11 @@ void OptionManager::set_option(Slice name, Slice value) {
if (!is_internal_option(name)) {
send_closure(G()->td(), &Td::send_update,
td_api::make_object<td_api::updateOption>(name.str(), get_option_value_object(get_option(name))));
} else {
auto update = get_internal_option_update(name);
if (update != nullptr) {
send_closure(G()->td(), &Td::send_update, std::move(update));
}
}
}
@ -299,6 +295,20 @@ bool OptionManager::is_internal_option(Slice name) {
}
}
td_api::object_ptr<td_api::Update> OptionManager::get_internal_option_update(Slice name) const {
if (name == "default_reaction") {
return get_update_default_reaction_type(get_option_string(name));
}
if (name == "otherwise_relogin_days") {
auto days = narrow_cast<int32>(get_option_integer(name));
if (days > 0) {
vector<SuggestedAction> added_actions{SuggestedAction{SuggestedAction::Type::SetPassword, DialogId(), days}};
return get_update_suggested_actions_object(added_actions, {});
}
}
return nullptr;
}
const vector<Slice> &OptionManager::get_synchronous_options() {
static const vector<Slice> options{"version", "commit_hash"};
return options;
@ -334,9 +344,6 @@ void OptionManager::on_option_updated(Slice name) {
}
break;
case 'd':
if (name == "default_reaction") {
send_update_default_reaction_type(get_option_string(name));
}
if (name == "dice_emojis") {
send_closure(td_->stickers_manager_actor_, &StickersManager::on_update_dice_emojis);
}
@ -414,13 +421,6 @@ void OptionManager::on_option_updated(Slice name) {
if (name == "online_cloud_timeout_ms") {
send_closure(td_->notification_manager_actor_, &NotificationManager::on_online_cloud_timeout_changed);
}
if (name == "otherwise_relogin_days") {
auto days = narrow_cast<int32>(get_option_integer(name));
if (days > 0) {
vector<SuggestedAction> added_actions{SuggestedAction{SuggestedAction::Type::SetPassword, DialogId(), days}};
send_closure(G()->td(), &Td::send_update, get_update_suggested_actions_object(added_actions, {}));
}
}
break;
case 'r':
if (name == "rating_e_decay") {
@ -900,6 +900,11 @@ void OptionManager::get_current_state(vector<td_api::object_ptr<td_api::Update>>
if (!is_internal_option(option.first)) {
updates.push_back(
td_api::make_object<td_api::updateOption>(option.first, get_option_value_object(option.second)));
} else {
auto update = get_internal_option_update(option.first);
if (update != nullptr) {
updates.push_back(std::move(update));
}
}
}
}

View File

@ -72,6 +72,8 @@ class OptionManager {
static bool is_internal_option(Slice name);
td_api::object_ptr<td_api::Update> get_internal_option_update(Slice name) const;
static const vector<Slice> &get_synchronous_options();
static td_api::object_ptr<td_api::OptionValue> get_unix_time_option_value_object();

View File

@ -12,7 +12,7 @@
namespace td {
// It is not about handling gaps.
// It is about finding mem processed pts.
// It is about finding mem processed PTS.
// All checks must be done before.
class PtsManager {

View File

@ -0,0 +1,82 @@
//
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2023
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include "td/telegram/QueryMerger.h"
#include "td/utils/logging.h"
#include "td/utils/Time.h"
namespace td {
QueryMerger::QueryMerger(Slice name, size_t max_concurrent_query_count, size_t max_merged_query_count)
: max_concurrent_query_count_(max_concurrent_query_count), max_merged_query_count_(max_merged_query_count) {
register_actor(name, this).release();
}
void QueryMerger::add_query(int64 query_id, Promise<Unit> &&promise) {
LOG(INFO) << "Add query " << query_id << " with" << (promise ? "" : "out") << " promise";
CHECK(query_id != 0);
auto &query = queries_[query_id];
query.promises_.push_back(std::move(promise));
if (query.promises_.size() != 1) {
// duplicate query, just wait
return;
}
pending_queries_.push(query_id);
loop();
}
void QueryMerger::send_query(vector<int64> query_ids) {
CHECK(merge_function_ != nullptr);
LOG(INFO) << "Send queries " << query_ids;
query_count_++;
merge_function_(query_ids, PromiseCreator::lambda([actor_id = actor_id(this), query_ids](Result<Unit> &&result) {
send_closure(actor_id, &QueryMerger::on_get_query_result, std::move(query_ids), std::move(result));
}));
}
void QueryMerger::on_get_query_result(vector<int64> query_ids, Result<Unit> &&result) {
LOG(INFO) << "Get result of queries " << query_ids << (result.is_error() ? " error" : " success");
query_count_--;
for (auto query_id : query_ids) {
auto it = queries_.find(query_id);
CHECK(it != queries_.end());
auto promises = std::move(it->second.promises_);
queries_.erase(it);
if (result.is_ok()) {
set_promises(promises);
} else {
fail_promises(promises, result.move_as_error());
}
}
loop();
}
void QueryMerger::loop() {
if (query_count_ == max_concurrent_query_count_) {
return;
}
vector<int64> query_ids;
while (!pending_queries_.empty()) {
auto query_id = pending_queries_.front();
pending_queries_.pop();
query_ids.push_back(query_id);
if (query_ids.size() == max_merged_query_count_) {
send_query(std::move(query_ids));
query_ids.clear();
if (query_count_ == max_concurrent_query_count_) {
break;
}
}
}
if (!query_ids.empty()) {
send_query(std::move(query_ids));
}
}
} // namespace td

54
td/telegram/QueryMerger.h Normal file
View File

@ -0,0 +1,54 @@
//
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2023
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#pragma once
#include "td/actor/actor.h"
#include "td/utils/common.h"
#include "td/utils/FlatHashMap.h"
#include "td/utils/Promise.h"
#include "td/utils/Slice.h"
#include "td/utils/Status.h"
#include <functional>
#include <queue>
namespace td {
// merges queries into a single request
class QueryMerger final : public Actor {
public:
QueryMerger(Slice name, size_t max_concurrent_query_count, size_t max_merged_query_count);
using MergeFunction = std::function<void(vector<int64> query_ids, Promise<Unit> &&promise)>;
void set_merge_function(MergeFunction merge_function) {
merge_function_ = std::move(merge_function);
}
void add_query(int64 query_id, Promise<Unit> &&promise);
private:
struct QueryInfo {
vector<Promise<Unit>> promises_;
};
size_t query_count_ = 0;
size_t max_concurrent_query_count_;
size_t max_merged_query_count_;
MergeFunction merge_function_;
std::queue<int64> pending_queries_;
FlatHashMap<int64, QueryInfo> queries_;
void send_query(vector<int64> query_ids);
void on_get_query_result(vector<int64> query_ids, Result<Unit> &&result);
void loop() final;
};
} // namespace td

View File

@ -1158,7 +1158,7 @@ void SecretChatActor::do_inbound_message_decrypted_pending(unique_ptr<log_event:
// Just save log event if necessary
auto log_event_id = message->log_event_id();
// qts
// QTS
auto qts_promise = std::move(message->promise);
if (log_event_id == 0) {
@ -1185,9 +1185,9 @@ Status SecretChatActor::do_inbound_message_decrypted(unique_ptr<log_event::Inbou
// 3. [save_log_event] => Add message to MessageManager [save_message]
// Note: if we are able to add message by random_id, we may not wait for (log event). Otherwise, we should force
// binlog flush.
// 4. [save_log_event] => Update qts [qts]
// 4. [save_log_event] => Update QTS [qts]
// 5. [save_changes; save_message; ?qts) => Remove log event [remove_log_event]
// Note: It is easier not to wait for qts. In the worst case old update will be handled again after restart.
// Note: It is easier not to wait for QTS. In the worst case old update will be handled again after restart.
auto state_id = inbound_message_states_.create();
InboundMessageState &state = *inbound_message_states_.get(state_id);
@ -1236,7 +1236,7 @@ Status SecretChatActor::do_inbound_message_decrypted(unique_ptr<log_event::Inbou
on_pfs_state_changed();
}
// qts
// QTS
auto qts_promise = std::move(message->promise);
// process message

View File

@ -111,7 +111,7 @@ class SecretChatActor final : public NetQueryCallback {
void cancel_chat(bool delete_history, bool is_already_discarded, Promise<> promise);
// Inbound messages
// Logevent is created by SecretChatsManager, because it must contain qts
// Logevent is created by SecretChatsManager, because it must contain QTS
void add_inbound_message(unique_ptr<log_event::InboundSecretMessage> message);
// Outbound messages
@ -479,7 +479,7 @@ class SecretChatActor final : public NetQueryCallback {
// This is completly flawed.
// (A-start_save_to_binlog ----> B-start_save_to_binlog+change_memory ----> A-finish_save_to_binlog+surprise)
//
// Instead, I suggest general solution that is already used with SeqNoState and qts
// Instead, I suggest general solution that is already used with SeqNoState and QTS
// 1. We APPLY CHANGE to memory immediately AFTER corresponding EVENT is SENT to the binlog.
// 2. We SEND CHANGE to database only after corresponding EVENT is SAVED to the binlog.
// 3. Then, we are able to ERASE EVENT just AFTER the CHANGE is SAVED to the binlog.

View File

@ -68,6 +68,7 @@ void StateManager::add_callback(unique_ptr<Callback> callback) {
callbacks_.push_back(std::move(callback));
}
}
void StateManager::wait_first_sync(Promise<> promise) {
if (was_sync_) {
return promise.set_value(Unit());

View File

@ -3174,7 +3174,7 @@ StickerSetId StickersManager::on_get_input_sticker_set(FileId sticker_file_id,
}));
}
// always return empty StickerSetId, because we can't trust the set_id provided by the peer in the secret chat
// the real sticker set id will be set in on_get_sticker if and only if the sticker is really from the set
// the real sticker set identifier will be set in on_get_sticker if and only if the sticker is really from the set
return StickerSetId();
}
case telegram_api::inputStickerSetAnimatedEmoji::ID:
@ -4707,6 +4707,7 @@ void StickersManager::search_stickers(string emoji, int32 limit,
void StickersManager::on_load_found_stickers_from_database(string emoji, string value) {
if (G()->close_flag()) {
on_search_stickers_failed(emoji, G()->close_status());
return;
}
if (value.empty()) {
@ -4744,6 +4745,18 @@ void StickersManager::on_search_stickers_finished(const string &emoji, const Fou
}
}
void StickersManager::on_search_stickers_failed(const string &emoji, Status &&error) {
auto it = search_stickers_queries_.find(emoji);
CHECK(it != search_stickers_queries_.end());
CHECK(!it->second.empty());
auto queries = std::move(it->second);
search_stickers_queries_.erase(it);
for (auto &query : queries) {
query.second.set_error(error.clone());
}
}
void StickersManager::on_find_stickers_success(const string &emoji,
tl_object_ptr<telegram_api::messages_Stickers> &&stickers) {
CHECK(stickers != nullptr);
@ -4791,15 +4804,7 @@ void StickersManager::on_find_stickers_fail(const string &emoji, Status &&error)
return on_find_stickers_success(emoji, make_tl_object<telegram_api::messages_stickersNotModified>());
}
auto it = search_stickers_queries_.find(emoji);
CHECK(it != search_stickers_queries_.end());
CHECK(!it->second.empty());
auto queries = std::move(it->second);
search_stickers_queries_.erase(it);
for (auto &query : queries) {
query.second.set_error(error.clone());
}
on_search_stickers_failed(emoji, std::move(error));
}
void StickersManager::get_premium_stickers(int32 limit, Promise<td_api::object_ptr<td_api::stickers>> &&promise) {
@ -5162,6 +5167,7 @@ void StickersManager::load_installed_sticker_sets(StickerType sticker_type, Prom
void StickersManager::on_load_installed_sticker_sets_from_database(StickerType sticker_type, string value) {
if (G()->close_flag()) {
on_get_installed_sticker_sets_failed(sticker_type, G()->close_status());
return;
}
if (value.empty()) {
@ -6134,7 +6140,14 @@ void StickersManager::load_custom_emoji_sticker_from_database(CustomEmojiId cust
}
void StickersManager::on_load_custom_emoji_from_database(CustomEmojiId custom_emoji_id, string value) {
auto it = custom_emoji_load_queries_.find(custom_emoji_id);
CHECK(it != custom_emoji_load_queries_.end());
CHECK(!it->second.empty());
auto promises = std::move(it->second);
custom_emoji_load_queries_.erase(it);
if (G()->close_flag()) {
fail_promises(promises, G()->close_status());
return;
}
@ -6149,12 +6162,6 @@ void StickersManager::on_load_custom_emoji_from_database(CustomEmojiId custom_em
LOG(INFO) << "Failed to load " << custom_emoji_id << " from database";
}
auto it = custom_emoji_load_queries_.find(custom_emoji_id);
CHECK(it != custom_emoji_load_queries_.end());
CHECK(!it->second.empty());
auto promises = std::move(it->second);
custom_emoji_load_queries_.erase(it);
set_promises(promises);
}
@ -8377,6 +8384,7 @@ void StickersManager::load_recent_stickers(bool is_attached, Promise<Unit> &&pro
void StickersManager::on_load_recent_stickers_from_database(bool is_attached, string value) {
if (G()->close_flag()) {
fail_promises(load_recent_stickers_queries_[is_attached], G()->close_status());
return;
}
if (value.empty()) {
@ -8777,6 +8785,7 @@ void StickersManager::load_favorite_stickers(Promise<Unit> &&promise) {
void StickersManager::on_load_favorite_stickers_from_database(const string &value) {
if (G()->close_flag()) {
fail_promises(load_favorite_stickers_queries_, G()->close_status());
return;
}
if (value.empty()) {

View File

@ -629,6 +629,8 @@ class StickersManager final : public Actor {
void on_search_stickers_finished(const string &emoji, const FoundStickers &found_stickers);
void on_search_stickers_failed(const string &emoji, Status &&error);
static string get_custom_emoji_database_key(CustomEmojiId custom_emoji_id);
void load_custom_emoji_sticker_from_database_force(CustomEmojiId custom_emoji_id);
@ -968,7 +970,8 @@ class StickersManager final : public Actor {
bool is_inited_ = false;
WaitFreeHashMap<FileId, unique_ptr<Sticker>, FileIdHash> stickers_; // file_id -> Sticker
WaitFreeHashMap<StickerSetId, unique_ptr<StickerSet>, StickerSetIdHash> sticker_sets_; // id -> StickerSet
WaitFreeHashMap<StickerSetId, unique_ptr<StickerSet>, StickerSetIdHash>
sticker_sets_; // sticker_set_id -> StickerSet
WaitFreeHashMap<string, StickerSetId> short_name_to_sticker_set_id_;
vector<StickerSetId> installed_sticker_set_ids_[MAX_STICKER_TYPE];

View File

@ -3119,8 +3119,6 @@ void Td::on_connection_state_changed(ConnectionState new_state) {
}
void Td::start_up() {
always_wait_for_mailbox();
uint64 check_endianness = 0x0706050403020100;
auto check_endianness_raw = reinterpret_cast<const unsigned char *>(&check_endianness);
for (unsigned char c = 0; c < 8; c++) {
@ -3486,7 +3484,7 @@ void Td::close_impl(bool destroy_flag) {
close_flag_ = 1;
G()->set_close_flag();
send_closure(auth_manager_actor_, &AuthManager::on_closing, destroy_flag);
updates_manager_->timeout_expired(); // save pts and qts
updates_manager_->timeout_expired(); // save PTS and QTS
// wait till all request_actors will stop
request_actors_.clear();

View File

@ -250,7 +250,7 @@ void UpdatesManager::fill_pts_gap(void *td) {
min_pts = min(min_pts, updates_manager->postponed_pts_updates_.begin()->first);
max_pts = max(max_pts, updates_manager->postponed_pts_updates_.rbegin()->first);
}
string source = PSTRING() << "pts from " << updates_manager->get_pts() << " to " << min_pts << '-' << max_pts;
string source = PSTRING() << "PTS from " << updates_manager->get_pts() << " to " << min_pts << '-' << max_pts;
fill_gap(td, source.c_str());
}
@ -284,7 +284,7 @@ void UpdatesManager::fill_qts_gap(void *td) {
min_qts = updates_manager->pending_qts_updates_.begin()->first;
max_qts = updates_manager->pending_qts_updates_.rbegin()->first;
}
string source = PSTRING() << "qts from " << updates_manager->get_qts() << " to " << min_qts << '-' << max_qts;
string source = PSTRING() << "QTS from " << updates_manager->get_qts() << " to " << min_qts << '-' << max_qts;
fill_gap(td, source.c_str());
}
@ -354,7 +354,7 @@ void UpdatesManager::run_get_difference(bool is_recursive, const char *source) {
pts = 0;
}
VLOG(get_difference) << "-----BEGIN GET DIFFERENCE----- from " << source << " with pts = " << pts << ", qts = " << qts
VLOG(get_difference) << "-----BEGIN GET DIFFERENCE----- from " << source << " with PTS = " << pts << ", QTS = " << qts
<< ", date = " << date;
before_get_difference(false);
@ -471,27 +471,27 @@ void UpdatesManager::timeout_expired() {
Promise<> UpdatesManager::set_pts(int32 pts, const char *source) {
if (pts == std::numeric_limits<int32>::max()) {
LOG(WARNING) << "Update pts from " << get_pts() << " to -1 from " << source;
LOG(WARNING) << "Update PTS from " << get_pts() << " to -1 from " << source;
save_pts(pts);
auto result = add_pts(pts);
init_state();
return result;
}
Promise<> result;
if (pts > get_pts() || (0 < pts && pts < get_pts() - 399999)) { // pts can only go up or drop cardinally
if (pts > get_pts() || (0 < pts && pts < get_pts() - 399999)) { // PTS can only go up or drop cardinally
if (pts < get_pts() - 399999) {
LOG(WARNING) << "PTS decreases from " << get_pts() << " to " << pts << " from " << source;
} else {
LOG(INFO) << "Update pts from " << get_pts() << " to " << pts << " from " << source;
LOG(INFO) << "Update PTS from " << get_pts() << " to " << pts << " from " << source;
}
result = add_pts(pts);
if (last_get_difference_pts_ < get_pts() - FORCED_GET_DIFFERENCE_PTS_DIFF) {
last_get_difference_pts_ = get_pts();
schedule_get_difference("rare pts getDifference");
schedule_get_difference("rare PTS getDifference");
}
} else if (pts < get_pts() && (pts > 1 || td_->option_manager_->get_option_integer("session_count") <= 1)) {
LOG(ERROR) << "Receive wrong pts = " << pts << " from " << source << ". Current pts = " << get_pts();
LOG(ERROR) << "Receive wrong PTS = " << pts << " from " << source << ". Current PTS = " << get_pts();
}
return result;
}
@ -1070,8 +1070,8 @@ void UpdatesManager::schedule_get_difference(const char *source) {
return;
}
if (!retry_timeout_.has_timeout()) {
LOG(WARNING) << "Schedule getDifference in " << retry_time_ << " seconds with pts = " << get_pts()
<< ", qts = " << get_qts() << ", date = " << get_date() << " from " << source;
LOG(WARNING) << "Schedule getDifference in " << retry_time_ << " seconds with PTS = " << get_pts()
<< ", QTS = " << get_qts() << ", date = " << get_date() << " from " << source;
retry_timeout_.set_callback(std::move(fill_get_difference_gap));
retry_timeout_.set_callback_data(static_cast<void *>(td_));
retry_timeout_.set_timeout_in(retry_time_);
@ -1091,12 +1091,12 @@ void UpdatesManager::on_get_updates_state(tl_object_ptr<telegram_api::updates_st
// TODO use state->unread_count;
if (get_pts() == std::numeric_limits<int32>::max()) {
LOG(WARNING) << "Restore pts to " << state->pts_;
// restoring right pts
LOG(WARNING) << "Restore PTS to " << state->pts_;
// restoring right PTS
CHECK(pending_pts_updates_.empty());
auto real_running_get_difference = running_get_difference_;
running_get_difference_ = false;
process_postponed_pts_updates(); // drop all updates with old pts
process_postponed_pts_updates(); // drop all updates with old PTS
running_get_difference_ = real_running_get_difference;
pts_manager_.init(state->pts_);
last_get_difference_pts_ = get_pts();
@ -1583,7 +1583,7 @@ void UpdatesManager::on_get_difference(tl_object_ptr<telegram_api::updates_Diffe
process_pending_qts_updates();
if (!pending_qts_updates_.empty()) {
LOG(WARNING) << "Drop " << pending_qts_updates_.size() << " pending qts updates after receive empty difference";
LOG(WARNING) << "Drop " << pending_qts_updates_.size() << " pending QTS updates after receive empty difference";
auto pending_qts_updates = std::move(pending_qts_updates_);
pending_qts_updates_.clear();
@ -1747,19 +1747,19 @@ void UpdatesManager::after_get_difference() {
auto begin_time = Time::now();
auto update_count = postponed_updates.size();
VLOG(get_difference) << "Begin to apply " << postponed_updates.size()
<< " postponed pts updates with pts = " << get_pts();
<< " postponed PTS updates with PTS = " << get_pts();
for (auto &postponed_update : postponed_updates) {
auto &update = postponed_update.second;
add_pending_pts_update(std::move(update.update), update.pts, update.pts_count, update.receive_time,
std::move(update.promise), AFTER_GET_DIFFERENCE_SOURCE);
CHECK(!running_get_difference_);
}
VLOG(get_difference) << "After applying postponed pts updates have pts = " << get_pts()
VLOG(get_difference) << "After applying postponed PTS updates have PTS = " << get_pts()
<< ", max_pts = " << accumulated_pts_ << " and " << pending_pts_updates_.size() << " + "
<< postponed_pts_updates_.size() << " pending pts updates";
<< postponed_pts_updates_.size() << " pending PTS updates";
auto passed_time = Time::now() - begin_time;
if (passed_time >= UPDATE_APPLY_WARNING_TIME) {
LOG(WARNING) << "Applied " << update_count << " pts updates in " << passed_time
LOG(WARNING) << "Applied " << update_count << " PTS updates in " << passed_time
<< " seconds after postponing them for " << (Time::now() - get_difference_start_time_) << " seconds";
}
}
@ -2164,16 +2164,16 @@ void UpdatesManager::add_pending_qts_update(tl_object_ptr<telegram_api::Update>
Promise<Unit> &&promise) {
CHECK(update != nullptr);
if (qts <= 1) {
LOG(ERROR) << "Receive wrong qts " << qts << " in " << oneline(to_string(update));
schedule_get_difference("wrong qts");
LOG(ERROR) << "Receive wrong QTS " << qts << " in " << oneline(to_string(update));
schedule_get_difference("wrong QTS");
promise.set_value(Unit());
return;
}
int32 old_qts = get_qts();
LOG(INFO) << "Process update with qts = " << qts << ", current qts = " << old_qts;
LOG(INFO) << "Process update with QTS = " << qts << ", current QTS = " << old_qts;
if (qts < old_qts - 100001) {
LOG(WARNING) << "Restore qts after qts overflow from " << old_qts << " to " << qts << " by "
LOG(WARNING) << "Restore QTS after QTS overflow from " << old_qts << " to " << qts << " by "
<< oneline(to_string(update));
add_qts(qts - 1).set_value(Unit());
CHECK(get_qts() == qts - 1);
@ -2182,19 +2182,19 @@ void UpdatesManager::add_pending_qts_update(tl_object_ptr<telegram_api::Update>
}
if (qts <= old_qts) {
LOG(INFO) << "Skip already applied update with qts = " << qts;
LOG(INFO) << "Skip already applied update with QTS = " << qts;
promise.set_value(Unit());
return;
}
if (running_get_difference_ || (qts - 1 > old_qts && old_qts > 0)) {
LOG(INFO) << "Postpone update with qts = " << qts;
LOG(INFO) << "Postpone update with QTS = " << qts;
if (!running_get_difference_ && pending_qts_updates_.empty()) {
set_qts_gap_timeout(MAX_UNFILLED_GAP_TIME);
}
auto &pending_update = pending_qts_updates_[qts];
if (pending_update.update != nullptr) {
LOG(WARNING) << "Receive duplicate update with qts = " << qts;
LOG(WARNING) << "Receive duplicate update with QTS = " << qts;
} else {
pending_update.receive_time = Time::now();
}
@ -2328,11 +2328,11 @@ void UpdatesManager::process_updates(vector<tl_object_ptr<telegram_api::Update>>
void UpdatesManager::process_pts_update(tl_object_ptr<telegram_api::Update> &&update) {
CHECK(update != nullptr);
// TODO need to save all updates that can change result of running queries not associated with pts (for example
// TODO need to save all updates that can change result of running queries not associated with PTS (for example
// getHistory) and apply the updates to results of the queries
if (!check_pts_update(update)) {
LOG(ERROR) << "Receive wrong pts update: " << oneline(to_string(update));
LOG(ERROR) << "Receive wrong PTS update: " << oneline(to_string(update));
update = nullptr;
return;
}
@ -2352,16 +2352,16 @@ void UpdatesManager::add_pending_pts_update(tl_object_ptr<telegram_api::Update>
CHECK(source != nullptr);
LOG(INFO) << "Receive from " << source << " pending " << to_string(update);
if (pts_count < 0 || new_pts <= pts_count) {
LOG(ERROR) << "Receive update with wrong pts = " << new_pts << " or pts_count = " << pts_count << " from " << source
LOG(ERROR) << "Receive update with wrong PTS = " << new_pts << " or pts_count = " << pts_count << " from " << source
<< ": " << oneline(to_string(update));
return promise.set_value(Unit());
}
// TODO need to save all updates that can change result of running queries not associated with pts (for example
// TODO need to save all updates that can change result of running queries not associated with PTS (for example
// getHistory) and apply them to result of this queries
if (!check_pts_update(update)) {
LOG(ERROR) << "Receive wrong pts update from " << source << ": " << oneline(to_string(update));
LOG(ERROR) << "Receive wrong PTS update from " << source << ": " << oneline(to_string(update));
return promise.set_value(Unit());
}
@ -2376,7 +2376,7 @@ void UpdatesManager::add_pending_pts_update(tl_object_ptr<telegram_api::Update>
auto now = Time::now();
if (now > last_pts_jump_warning_time_ + 1 && (need_restore_pts || now < last_pts_jump_warning_time_ + 5) &&
!(old_pts == std::numeric_limits<int32>::max() && running_get_difference_)) {
LOG(ERROR) << "Restore pts after delete_first_messages from " << old_pts << " to " << new_pts
LOG(ERROR) << "Restore PTS after delete_first_messages from " << old_pts << " to " << new_pts
<< " is disabled, pts_count = " << pts_count << ", update is from " << source << ": "
<< oneline(to_string(update));
last_pts_jump_warning_time_ = now;
@ -2385,8 +2385,8 @@ void UpdatesManager::add_pending_pts_update(tl_object_ptr<telegram_api::Update>
set_pts_gap_timeout(0.001);
/*
LOG(WARNING) << "Restore pts after delete_first_messages";
set_pts(new_pts - 1, "restore pts after delete_first_messages");
LOG(WARNING) << "Restore PTS after delete_first_messages";
set_pts(new_pts - 1, "restore PTS after delete_first_messages");
old_pts = get_pts();
CHECK(old_pts == new_pts - 1);
*/
@ -2408,7 +2408,7 @@ void UpdatesManager::add_pending_pts_update(tl_object_ptr<telegram_api::Update>
return;
}
// is_acceptable_update check was skipped for postponed pts updates
// is_acceptable_update check was skipped for postponed PTS updates
if (source == AFTER_GET_DIFFERENCE_SOURCE && !is_acceptable_update(update.get())) {
LOG(INFO) << "Postpone again unacceptable pending update";
postpone_pts_update(std::move(update), new_pts, pts_count, receive_time, std::move(promise));
@ -2507,7 +2507,7 @@ void UpdatesManager::process_qts_update(tl_object_ptr<telegram_api::Update> &&up
LOG(DEBUG) << "Process " << to_string(update_ptr);
if (last_get_difference_qts_ < qts - FORCED_GET_DIFFERENCE_PTS_DIFF) {
if (last_get_difference_qts_ != 0) {
schedule_get_difference("rare qts getDifference");
schedule_get_difference("rare QTS getDifference");
}
last_get_difference_qts_ = qts;
}
@ -2576,7 +2576,7 @@ void UpdatesManager::process_all_pending_pts_updates() {
auto diff = Time::now() - last_pts_gap_time_;
last_pts_gap_time_ = 0;
if (diff > 0.1) {
VLOG(get_difference) << "Gap in pts from " << accumulated_pts_ - accumulated_pts_count_ << " to "
VLOG(get_difference) << "Gap in PTS from " << accumulated_pts_ - accumulated_pts_count_ << " to "
<< accumulated_pts_ << " has been filled in " << begin_diff << '-' << diff << " seconds";
}
}
@ -2631,7 +2631,7 @@ void UpdatesManager::process_postponed_pts_updates() {
if (old_pts > new_pts - pts_count || last_update_it == postponed_pts_updates_.end() ||
i == GAP_TIMEOUT_UPDATE_COUNT) {
// the updates can't be applied
VLOG(get_difference) << "Can't apply " << i << " next postponed updates with pts " << update_it->second.pts
VLOG(get_difference) << "Can't apply " << i << " next postponed updates with PTS " << update_it->second.pts
<< '-' << new_pts << ", because their pts_count is " << pts_count
<< " instead of expected " << new_pts - old_pts;
last_update_it = update_it;
@ -2795,7 +2795,7 @@ void UpdatesManager::process_pending_qts_updates() {
return;
}
LOG(DEBUG) << "Process " << pending_qts_updates_.size() << " pending qts updates";
LOG(DEBUG) << "Process " << pending_qts_updates_.size() << " pending QTS updates";
auto begin_time = Time::now();
auto initial_qts = get_qts();
int32 applied_update_count = 0;

View File

@ -225,7 +225,7 @@ class UpdatesManager final : public Actor {
std::multimap<int32, PendingSeqUpdates> postponed_updates_; // updates received during getDifference
std::multimap<int32, PendingSeqUpdates> pending_seq_updates_; // updates with too big seq
std::map<int32, PendingQtsUpdate> pending_qts_updates_; // updates with too big qts
std::map<int32, PendingQtsUpdate> pending_qts_updates_; // updates with too big QTS
Timeout pts_gap_timeout_;

View File

@ -207,7 +207,7 @@ class WebPagesManager::WebPageInstantView {
friend StringBuilder &operator<<(StringBuilder &string_builder,
const WebPagesManager::WebPageInstantView &instant_view) {
return string_builder << "InstantView(url = " << instant_view.url << ", size = " << instant_view.page_blocks.size()
return string_builder << "InstantView(URL = " << instant_view.url << ", size = " << instant_view.page_blocks.size()
<< ", view_count = " << instant_view.view_count << ", hash = " << instant_view.hash
<< ", is_empty = " << instant_view.is_empty << ", is_v2 = " << instant_view.is_v2
<< ", is_rtl = " << instant_view.is_rtl << ", is_full = " << instant_view.is_full
@ -696,7 +696,7 @@ void WebPagesManager::on_get_web_page_by_url(const string &url, WebPageId web_pa
}
if (cached_web_page_id.is_valid() && web_page_id.is_valid() && web_page_id != cached_web_page_id) {
LOG(ERROR) << "Url \"" << url << "\" preview is changed from " << cached_web_page_id << " to " << web_page_id;
LOG(ERROR) << "URL \"" << url << "\" preview is changed from " << cached_web_page_id << " to " << web_page_id;
}
cached_web_page_id = web_page_id;
@ -781,7 +781,7 @@ void WebPagesManager::on_get_web_page_preview_success(int64 request_id, const st
void WebPagesManager::on_get_web_page_preview_fail(int64 request_id, const string &url, Status error,
Promise<Unit> &&promise) {
LOG(INFO) << "Clean up getting of web page preview with url \"" << url << '"';
LOG(INFO) << "Clean up getting of web page preview with URL \"" << url << '"';
CHECK(error.is_error());
promise.set_error(std::move(error));
}
@ -830,7 +830,7 @@ tl_object_ptr<td_api::webPage> WebPagesManager::get_web_page_preview_result(int6
}
void WebPagesManager::get_web_page_instant_view(const string &url, bool force_full, Promise<WebPageId> &&promise) {
LOG(INFO) << "Trying to get web page instant view for the url \"" << url << '"';
LOG(INFO) << "Trying to get web page instant view for the URL \"" << url << '"';
if (url.empty()) {
return promise.set_value(WebPageId());
}
@ -1046,16 +1046,16 @@ WebPageId WebPagesManager::get_web_page_by_url(const string &url) const {
auto it = url_to_web_page_id_.find(url);
if (it != url_to_web_page_id_.end()) {
LOG(INFO) << "Return " << it->second << " for the url \"" << url << '"';
LOG(INFO) << "Return " << it->second << " for the URL \"" << url << '"';
return it->second;
}
LOG(INFO) << "Can't find web page identifier for the url \"" << url << '"';
LOG(INFO) << "Can't find web page identifier for the URL \"" << url << '"';
return WebPageId();
}
void WebPagesManager::get_web_page_by_url(const string &url, Promise<WebPageId> &&promise) {
LOG(INFO) << "Trying to get web page identifier for the url \"" << url << '"';
LOG(INFO) << "Trying to get web page identifier for the URL \"" << url << '"';
if (url.empty()) {
return promise.set_value(WebPageId());
}
@ -1089,7 +1089,7 @@ void WebPagesManager::load_web_page_by_url(string url, Promise<WebPageId> &&prom
void WebPagesManager::on_load_web_page_id_by_url_from_database(string url, string value, Promise<WebPageId> &&promise) {
TRY_STATUS_PROMISE(promise, G()->close_status());
LOG(INFO) << "Successfully loaded url \"" << url << "\" of size " << value.size() << " from database";
LOG(INFO) << "Successfully loaded URL \"" << url << "\" of size " << value.size() << " from database";
// G()->td_db()->get_sqlite_pmc()->erase(get_web_page_url_database_key(web_page_id), Auto());
// value.clear();

View File

@ -2242,10 +2242,11 @@ class CliClient final : public Actor {
MessageId from_message_id;
int32 offset;
string limit;
get_args(args, chat_id, args);
if (op == "gmth") {
get_args(args, thread_message_id, args);
}
get_args(args, chat_id, from_message_id, offset, limit);
get_args(args, from_message_id, offset, limit);
if (op == "gmth") {
send_request(td_api::make_object<td_api::getMessageThreadHistory>(chat_id, thread_message_id, from_message_id,
offset, as_limit(limit)));

View File

@ -60,8 +60,8 @@ class FileDb final : public FileDbInterface {
public:
class FileDbActor final : public Actor {
public:
FileDbActor(FileDbId current_pmc_id, std::shared_ptr<SqliteKeyValueSafe> file_kv_safe)
: current_pmc_id_(current_pmc_id), file_kv_safe_(std::move(file_kv_safe)) {
FileDbActor(FileDbId max_file_db_id, std::shared_ptr<SqliteKeyValueSafe> file_kv_safe)
: max_file_db_id_(max_file_db_id), file_kv_safe_(std::move(file_kv_safe)) {
}
void close(Promise<> promise) {
@ -72,20 +72,21 @@ class FileDb final : public FileDbInterface {
}
void load_file_data(const string &key, Promise<FileData> promise) {
promise.set_result(load_file_data_impl(actor_id(this), file_pmc(), key, current_pmc_id_));
promise.set_result(load_file_data_impl(actor_id(this), file_pmc(), key, max_file_db_id_));
}
void clear_file_data(FileDbId id, const string &remote_key, const string &local_key, const string &generate_key) {
void clear_file_data(FileDbId file_db_id, const string &remote_key, const string &local_key,
const string &generate_key) {
auto &pmc = file_pmc();
pmc.begin_write_transaction().ensure();
if (id > current_pmc_id_) {
pmc.set("file_id", to_string(id.get()));
current_pmc_id_ = id;
if (file_db_id > max_file_db_id_) {
pmc.set("file_id", to_string(file_db_id.get()));
max_file_db_id_ = file_db_id;
}
pmc.erase(PSTRING() << "file" << id.get());
// LOG(DEBUG) << "ERASE " << format::as_hex_dump<4>(Slice(PSLICE() << "file" << id.get()));
pmc.erase(PSTRING() << "file" << file_db_id.get());
// LOG(DEBUG) << "ERASE " << format::as_hex_dump<4>(Slice(PSLICE() << "file" << file_db_id.get()));
if (!remote_key.empty()) {
pmc.erase(remote_key);
@ -102,79 +103,79 @@ class FileDb final : public FileDbInterface {
pmc.commit_transaction().ensure();
}
void store_file_data(FileDbId id, const string &file_data, const string &remote_key, const string &local_key,
const string &generate_key) {
void store_file_data(FileDbId file_db_id, const string &file_data, const string &remote_key,
const string &local_key, const string &generate_key) {
auto &pmc = file_pmc();
pmc.begin_write_transaction().ensure();
if (id > current_pmc_id_) {
pmc.set("file_id", to_string(id.get()));
current_pmc_id_ = id;
if (file_db_id > max_file_db_id_) {
pmc.set("file_id", to_string(file_db_id.get()));
max_file_db_id_ = file_db_id;
}
pmc.set(PSTRING() << "file" << id.get(), file_data);
pmc.set(PSTRING() << "file" << file_db_id.get(), file_data);
if (!remote_key.empty()) {
pmc.set(remote_key, to_string(id.get()));
pmc.set(remote_key, to_string(file_db_id.get()));
}
if (!local_key.empty()) {
pmc.set(local_key, to_string(id.get()));
pmc.set(local_key, to_string(file_db_id.get()));
}
if (!generate_key.empty()) {
pmc.set(generate_key, to_string(id.get()));
pmc.set(generate_key, to_string(file_db_id.get()));
}
pmc.commit_transaction().ensure();
}
void store_file_data_ref(FileDbId id, FileDbId new_id) {
void store_file_data_ref(FileDbId file_db_id, FileDbId new_file_db_id) {
auto &pmc = file_pmc();
pmc.begin_write_transaction().ensure();
if (id > current_pmc_id_) {
pmc.set("file_id", to_string(id.get()));
current_pmc_id_ = id;
if (file_db_id > max_file_db_id_) {
pmc.set("file_id", to_string(file_db_id.get()));
max_file_db_id_ = file_db_id;
}
do_store_file_data_ref(id, new_id);
do_store_file_data_ref(file_db_id, new_file_db_id);
pmc.commit_transaction().ensure();
}
void optimize_refs(std::vector<FileDbId> ids, FileDbId main_id) {
LOG(INFO) << "Optimize " << ids.size() << " ids in file database to " << main_id.get();
void optimize_refs(std::vector<FileDbId> file_db_ids, FileDbId main_file_db_id) {
LOG(INFO) << "Optimize " << file_db_ids.size() << " file_db_ids in file database to " << main_file_db_id.get();
auto &pmc = file_pmc();
pmc.begin_write_transaction().ensure();
for (size_t i = 0; i + 1 < ids.size(); i++) {
do_store_file_data_ref(ids[i], main_id);
for (size_t i = 0; i + 1 < file_db_ids.size(); i++) {
do_store_file_data_ref(file_db_ids[i], main_file_db_id);
}
pmc.commit_transaction().ensure();
}
private:
FileDbId current_pmc_id_;
FileDbId max_file_db_id_;
std::shared_ptr<SqliteKeyValueSafe> file_kv_safe_;
SqliteKeyValue &file_pmc() {
return file_kv_safe_->get();
}
void do_store_file_data_ref(FileDbId id, FileDbId new_id) {
file_pmc().set(PSTRING() << "file" << id.get(), PSTRING() << "@@" << new_id.get());
void do_store_file_data_ref(FileDbId file_db_id, FileDbId new_file_db_id) {
file_pmc().set(PSTRING() << "file" << file_db_id.get(), PSTRING() << "@@" << new_file_db_id.get());
}
};
explicit FileDb(std::shared_ptr<SqliteKeyValueSafe> kv_safe, int scheduler_id = -1) {
file_kv_safe_ = std::move(kv_safe);
CHECK(file_kv_safe_);
current_pmc_id_ = FileDbId(to_integer<uint64>(file_kv_safe_->get().get("file_id")));
max_file_db_id_ = FileDbId(to_integer<uint64>(file_kv_safe_->get().get("file_id")));
file_db_actor_ =
create_actor_on_scheduler<FileDbActor>("FileDbActor", scheduler_id, current_pmc_id_, file_kv_safe_);
create_actor_on_scheduler<FileDbActor>("FileDbActor", scheduler_id, max_file_db_id_, file_kv_safe_);
}
FileDbId create_pmc_id() final {
current_pmc_id_ = FileDbId(current_pmc_id_.get() + 1);
return current_pmc_id_;
FileDbId get_next_file_db_id() final {
max_file_db_id_ = FileDbId(max_file_db_id_.get() + 1);
return max_file_db_id_;
}
void close(Promise<> promise) final {
@ -186,10 +187,10 @@ class FileDb final : public FileDbInterface {
}
Result<FileData> get_file_data_sync_impl(string key) final {
return load_file_data_impl(file_db_actor_.get(), file_kv_safe_->get(), key, current_pmc_id_);
return load_file_data_impl(file_db_actor_.get(), file_kv_safe_->get(), key, max_file_db_id_);
}
void clear_file_data(FileDbId id, const FileData &file_data) final {
void clear_file_data(FileDbId file_db_id, const FileData &file_data) final {
string remote_key;
if (file_data.remote_.type() == RemoteFileLocation::Type::Full) {
remote_key = as_key(file_data.remote_.full());
@ -202,10 +203,11 @@ class FileDb final : public FileDbInterface {
if (file_data.generate_ != nullptr) {
generate_key = as_key(*file_data.generate_);
}
send_closure(file_db_actor_, &FileDbActor::clear_file_data, id, remote_key, local_key, generate_key);
send_closure(file_db_actor_, &FileDbActor::clear_file_data, file_db_id, remote_key, local_key, generate_key);
}
void set_file_data(FileDbId id, const FileData &file_data, bool new_remote, bool new_local, bool new_generate) final {
void set_file_data(FileDbId file_db_id, const FileData &file_data, bool new_remote, bool new_local,
bool new_generate) final {
string remote_key;
if (file_data.remote_.type() == RemoteFileLocation::Type::Full && new_remote) {
remote_key = as_key(file_data.remote_.full());
@ -218,16 +220,16 @@ class FileDb final : public FileDbInterface {
if (file_data.generate_ != nullptr && new_generate) {
generate_key = as_key(*file_data.generate_);
}
// LOG(DEBUG) << "SAVE " << id.get() << " -> " << file_data << " "
// LOG(DEBUG) << "SAVE " << file_db_id.get() << " -> " << file_data << " "
// << tag("remote_key", format::as_hex_dump<4>(Slice(remote_key)))
// << tag("local_key", format::as_hex_dump<4>(Slice(local_key)))
// << tag("generate_key", format::as_hex_dump<4>(Slice(generate_key)));
send_closure(file_db_actor_, &FileDbActor::store_file_data, id, serialize(file_data), remote_key, local_key,
send_closure(file_db_actor_, &FileDbActor::store_file_data, file_db_id, serialize(file_data), remote_key, local_key,
generate_key);
}
void set_file_data_ref(FileDbId id, FileDbId new_id) final {
send_closure(file_db_actor_, &FileDbActor::store_file_data_ref, id, new_id);
void set_file_data_ref(FileDbId file_db_id, FileDbId new_file_db_id) final {
send_closure(file_db_actor_, &FileDbActor::store_file_data_ref, file_db_id, new_file_db_id);
}
SqliteKeyValue &pmc() final {
return file_kv_safe_->get();
@ -235,39 +237,39 @@ class FileDb final : public FileDbInterface {
private:
ActorOwn<FileDbActor> file_db_actor_;
FileDbId current_pmc_id_;
FileDbId max_file_db_id_;
std::shared_ptr<SqliteKeyValueSafe> file_kv_safe_;
static Result<FileData> load_file_data_impl(ActorId<FileDbActor> file_db_actor_id, SqliteKeyValue &pmc,
const string &key, FileDbId current_pmc_id) {
const string &key, FileDbId max_file_db_id) {
// LOG(DEBUG) << "Load by key " << format::as_hex_dump<4>(Slice(key));
TRY_RESULT(id, get_id(pmc, key));
TRY_RESULT(file_db_id, get_file_db_id(pmc, key));
vector<FileDbId> ids;
vector<FileDbId> file_db_ids;
string data_str;
int attempt_count = 0;
while (true) {
if (attempt_count > 100) {
LOG(FATAL) << "Cycle in file database? current_pmc_id=" << current_pmc_id << " key=" << key
<< " links=" << format::as_array(ids);
LOG(FATAL) << "Cycle in file database? max_file_db_id=" << max_file_db_id << " key=" << key
<< " links=" << format::as_array(file_db_ids);
}
attempt_count++;
data_str = pmc.get(PSTRING() << "file" << id.get());
data_str = pmc.get(PSTRING() << "file" << file_db_id.get());
auto data_slice = Slice(data_str);
if (data_slice.substr(0, 2) == "@@") {
ids.push_back(id);
file_db_ids.push_back(file_db_id);
id = FileDbId(to_integer<uint64>(data_slice.substr(2)));
file_db_id = FileDbId(to_integer<uint64>(data_slice.substr(2)));
} else {
break;
}
}
if (ids.size() > 1) {
send_closure(file_db_actor_id, &FileDbActor::optimize_refs, std::move(ids), id);
if (file_db_ids.size() > 1) {
send_closure(file_db_actor_id, &FileDbActor::optimize_refs, std::move(file_db_ids), file_db_id);
}
// LOG(DEBUG) << "By ID " << id.get() << " found data " << format::as_hex_dump<4>(Slice(data_str));
// LOG(DEBUG) << "By ID " << file_db_id.get() << " found data " << format::as_hex_dump<4>(Slice(data_str));
// LOG(INFO) << attempt_count;
log_event::WithVersion<TlParser> parser(data_str);
@ -282,13 +284,13 @@ class FileDb final : public FileDbInterface {
return std::move(data);
}
static Result<FileDbId> get_id(SqliteKeyValue &pmc, const string &key) TD_WARN_UNUSED_RESULT {
auto id_str = pmc.get(key);
// LOG(DEBUG) << "Found ID " << id_str << " by key " << format::as_hex_dump<4>(Slice(key));
if (id_str.empty()) {
return Status::Error("There is no such a key in database");
static Result<FileDbId> get_file_db_id(SqliteKeyValue &pmc, const string &key) TD_WARN_UNUSED_RESULT {
auto file_db_id_str = pmc.get(key);
// LOG(DEBUG) << "Found ID " << file_db_id_str << " by key " << format::as_hex_dump<4>(Slice(key));
if (file_db_id_str.empty()) {
return Status::Error("There is no such key in the database");
}
return FileDbId(to_integer<uint64>(id_str));
return FileDbId(to_integer<uint64>(file_db_id_str));
}
};

View File

@ -39,7 +39,7 @@ class FileDbInterface {
virtual ~FileDbInterface() = default;
// non-thread-safe
virtual FileDbId create_pmc_id() = 0;
virtual FileDbId get_next_file_db_id() = 0;
// thread-safe
virtual void close(Promise<> promise) = 0;
@ -75,10 +75,10 @@ class FileDbInterface {
return res;
}
virtual void clear_file_data(FileDbId id, const FileData &file_data) = 0;
virtual void set_file_data(FileDbId id, const FileData &file_data, bool new_remote, bool new_local,
virtual void clear_file_data(FileDbId file_db_id, const FileData &file_data) = 0;
virtual void set_file_data(FileDbId file_db_id, const FileData &file_data, bool new_remote, bool new_local,
bool new_generate) = 0;
virtual void set_file_data_ref(FileDbId id, FileDbId new_id) = 0;
virtual void set_file_data_ref(FileDbId file_db_id, FileDbId new_file_db_id) = 0;
// For FileStatsWorker. TODO: remove it
virtual SqliteKeyValue &pmc() = 0;

View File

@ -51,8 +51,8 @@ class FileDbId {
}
};
inline StringBuilder &operator<<(StringBuilder &sb, const FileDbId &id) {
return sb << "FileDbId{" << id.get() << "}";
inline StringBuilder &operator<<(StringBuilder &sb, const FileDbId &file_db_id) {
return sb << "FileDbId{" << file_db_id.get() << "}";
}
} // namespace td

View File

@ -245,16 +245,16 @@ Result<std::pair<NetQueryPtr, bool>> FileDownloader::start_part(Part part, int32
}
#endif
DcId dc_id = remote_.is_web() ? G()->get_webfile_dc_id() : remote_.get_dc_id();
auto id = UniqueId::next(UniqueId::Type::Default, static_cast<uint8>(QueryType::Default));
auto unique_id = UniqueId::next(UniqueId::Type::Default, static_cast<uint8>(QueryType::Default));
net_query =
remote_.is_web()
? G()->net_query_creator().create(
id,
unique_id,
telegram_api::upload_getWebFile(remote_.as_input_web_file_location(), narrow_cast<int32>(part.offset),
narrow_cast<int32>(size)),
{}, dc_id, net_query_type, NetQuery::AuthFlag::On)
: G()->net_query_creator().create(
id,
unique_id,
telegram_api::upload_getFile(flags, false /*ignored*/, false /*ignored*/,
remote_.as_input_file_location(), part.offset, narrow_cast<int32>(size)),
{}, dc_id, net_query_type, NetQuery::AuthFlag::On);

View File

@ -43,7 +43,7 @@ ActorOwn<ResourceManager> &FileLoadManager::get_download_resource_manager(bool i
return actor;
}
void FileLoadManager::download(QueryId id, const FullRemoteFileLocation &remote_location,
void FileLoadManager::download(QueryId query_id, const FullRemoteFileLocation &remote_location,
const LocalFileLocation &local, int64 size, string name,
const FileEncryptionKey &encryption_key, bool search_file, int64 offset, int64 limit,
int8 priority) {
@ -53,7 +53,7 @@ void FileLoadManager::download(QueryId id, const FullRemoteFileLocation &remote_
NodeId node_id = nodes_container_.create(Node());
Node *node = nodes_container_.get(node_id);
CHECK(node);
node->query_id_ = id;
node->query_id_ = query_id;
auto callback = make_unique<FileDownloaderCallback>(actor_shared(this, node_id));
bool is_small = size < 20 * 1024;
node->loader_ =
@ -63,11 +63,11 @@ void FileLoadManager::download(QueryId id, const FullRemoteFileLocation &remote_
auto &resource_manager = get_download_resource_manager(is_small, dc_id);
send_closure(resource_manager, &ResourceManager::register_worker,
ActorShared<FileLoaderActor>(node->loader_.get(), static_cast<uint64>(-1)), priority);
bool is_inserted = query_id_to_node_id_.emplace(id, node_id).second;
bool is_inserted = query_id_to_node_id_.emplace(query_id, node_id).second;
CHECK(is_inserted);
}
void FileLoadManager::upload(QueryId id, const LocalFileLocation &local_location,
void FileLoadManager::upload(QueryId query_id, const LocalFileLocation &local_location,
const RemoteFileLocation &remote_location, int64 expected_size,
const FileEncryptionKey &encryption_key, int8 priority, vector<int> bad_parts) {
if (stop_flag_) {
@ -76,17 +76,17 @@ void FileLoadManager::upload(QueryId id, const LocalFileLocation &local_location
NodeId node_id = nodes_container_.create(Node());
Node *node = nodes_container_.get(node_id);
CHECK(node);
node->query_id_ = id;
node->query_id_ = query_id;
auto callback = make_unique<FileUploaderCallback>(actor_shared(this, node_id));
node->loader_ = create_actor<FileUploader>("Uploader", local_location, remote_location, expected_size, encryption_key,
std::move(bad_parts), std::move(callback));
send_closure(upload_resource_manager_, &ResourceManager::register_worker,
ActorShared<FileLoaderActor>(node->loader_.get(), static_cast<uint64>(-1)), priority);
bool is_inserted = query_id_to_node_id_.emplace(id, node_id).second;
bool is_inserted = query_id_to_node_id_.emplace(query_id, node_id).second;
CHECK(is_inserted);
}
void FileLoadManager::upload_by_hash(QueryId id, const FullLocalFileLocation &local_location, int64 size,
void FileLoadManager::upload_by_hash(QueryId query_id, const FullLocalFileLocation &local_location, int64 size,
int8 priority) {
if (stop_flag_) {
return;
@ -94,20 +94,20 @@ void FileLoadManager::upload_by_hash(QueryId id, const FullLocalFileLocation &lo
NodeId node_id = nodes_container_.create(Node());
Node *node = nodes_container_.get(node_id);
CHECK(node);
node->query_id_ = id;
node->query_id_ = query_id;
auto callback = make_unique<FileHashUploaderCallback>(actor_shared(this, node_id));
node->loader_ = create_actor<FileHashUploader>("HashUploader", local_location, size, std::move(callback));
send_closure(upload_resource_manager_, &ResourceManager::register_worker,
ActorShared<FileLoaderActor>(node->loader_.get(), static_cast<uint64>(-1)), priority);
bool is_inserted = query_id_to_node_id_.emplace(id, node_id).second;
bool is_inserted = query_id_to_node_id_.emplace(query_id, node_id).second;
CHECK(is_inserted);
}
void FileLoadManager::update_priority(QueryId id, int8 priority) {
void FileLoadManager::update_priority(QueryId query_id, int8 priority) {
if (stop_flag_) {
return;
}
auto it = query_id_to_node_id_.find(id);
auto it = query_id_to_node_id_.find(query_id);
if (it == query_id_to_node_id_.end()) {
return;
}
@ -118,18 +118,18 @@ void FileLoadManager::update_priority(QueryId id, int8 priority) {
send_closure(node->loader_, &FileLoaderActor::update_priority, priority);
}
void FileLoadManager::from_bytes(QueryId id, FileType type, BufferSlice bytes, string name) {
void FileLoadManager::from_bytes(QueryId query_id, FileType type, BufferSlice bytes, string name) {
if (stop_flag_) {
return;
}
NodeId node_id = nodes_container_.create(Node());
Node *node = nodes_container_.get(node_id);
CHECK(node);
node->query_id_ = id;
node->query_id_ = query_id;
auto callback = make_unique<FileFromBytesCallback>(actor_shared(this, node_id));
node->loader_ =
create_actor<FileFromBytes>("FromBytes", type, std::move(bytes), std::move(name), std::move(callback));
bool is_inserted = query_id_to_node_id_.emplace(id, node_id).second;
bool is_inserted = query_id_to_node_id_.emplace(query_id, node_id).second;
CHECK(is_inserted);
}
@ -160,23 +160,21 @@ void FileLoadManager::check_partial_local_location(PartialLocalFileLocation part
}
}
// void upload_reload_parts(QueryId id, vector<int32> parts);
// void upload_restart(QueryId id);
void FileLoadManager::cancel(QueryId id) {
void FileLoadManager::cancel(QueryId query_id) {
if (stop_flag_) {
return;
}
auto it = query_id_to_node_id_.find(id);
auto it = query_id_to_node_id_.find(query_id);
if (it == query_id_to_node_id_.end()) {
return;
}
on_error_impl(it->second, Status::Error(-1, "Canceled"));
}
void FileLoadManager::update_local_file_location(QueryId id, const LocalFileLocation &local) {
void FileLoadManager::update_local_file_location(QueryId query_id, const LocalFileLocation &local) {
if (stop_flag_) {
return;
}
auto it = query_id_to_node_id_.find(id);
auto it = query_id_to_node_id_.find(query_id);
if (it == query_id_to_node_id_.end()) {
return;
}
@ -187,11 +185,11 @@ void FileLoadManager::update_local_file_location(QueryId id, const LocalFileLoca
send_closure(node->loader_, &FileLoaderActor::update_local_file_location, local);
}
void FileLoadManager::update_downloaded_part(QueryId id, int64 offset, int64 limit) {
void FileLoadManager::update_downloaded_part(QueryId query_id, int64 offset, int64 limit) {
if (stop_flag_) {
return;
}
auto it = query_id_to_node_id_.find(id);
auto it = query_id_to_node_id_.find(query_id);
if (it == query_id_to_node_id_.end()) {
return;
}
@ -203,7 +201,7 @@ void FileLoadManager::update_downloaded_part(QueryId id, int64 offset, int64 lim
}
void FileLoadManager::hangup() {
nodes_container_.for_each([](auto id, auto &node) { node.loader_.reset(); });
nodes_container_.for_each([](auto query_id, auto &node) { node.loader_.reset(); });
stop_flag_ = true;
loop();
}

View File

@ -34,30 +34,30 @@ class FileLoadManager final : public Actor {
using QueryId = uint64;
class Callback : public Actor {
public:
virtual void on_start_download(QueryId id) = 0;
virtual void on_partial_download(QueryId id, PartialLocalFileLocation partial_local, int64 ready_size,
virtual void on_start_download(QueryId query_id) = 0;
virtual void on_partial_download(QueryId query_id, PartialLocalFileLocation partial_local, int64 ready_size,
int64 size) = 0;
virtual void on_partial_upload(QueryId id, PartialRemoteFileLocation partial_remote, int64 ready_size) = 0;
virtual void on_hash(QueryId id, string hash) = 0;
virtual void on_upload_ok(QueryId id, FileType file_type, PartialRemoteFileLocation remtoe, int64 size) = 0;
virtual void on_upload_full_ok(QueryId id, FullRemoteFileLocation remote) = 0;
virtual void on_download_ok(QueryId id, FullLocalFileLocation local, int64 size, bool is_new) = 0;
virtual void on_error(QueryId id, Status status) = 0;
virtual void on_partial_upload(QueryId query_id, PartialRemoteFileLocation partial_remote, int64 ready_size) = 0;
virtual void on_hash(QueryId query_id, string hash) = 0;
virtual void on_upload_ok(QueryId query_id, FileType file_type, PartialRemoteFileLocation remtoe, int64 size) = 0;
virtual void on_upload_full_ok(QueryId query_id, FullRemoteFileLocation remote) = 0;
virtual void on_download_ok(QueryId query_id, FullLocalFileLocation local, int64 size, bool is_new) = 0;
virtual void on_error(QueryId query_id, Status status) = 0;
};
explicit FileLoadManager(ActorShared<Callback> callback, ActorShared<> parent);
void download(QueryId id, const FullRemoteFileLocation &remote_location, const LocalFileLocation &local, int64 size,
string name, const FileEncryptionKey &encryption_key, bool search_file, int64 offset, int64 limit,
int8 priority);
void upload(QueryId id, const LocalFileLocation &local_location, const RemoteFileLocation &remote_location,
void download(QueryId query_id, const FullRemoteFileLocation &remote_location, const LocalFileLocation &local,
int64 size, string name, const FileEncryptionKey &encryption_key, bool search_file, int64 offset,
int64 limit, int8 priority);
void upload(QueryId query_id, const LocalFileLocation &local_location, const RemoteFileLocation &remote_location,
int64 expected_size, const FileEncryptionKey &encryption_key, int8 priority, vector<int> bad_parts);
void upload_by_hash(QueryId id, const FullLocalFileLocation &local_location, int64 size, int8 priority);
void update_priority(QueryId id, int8 priority);
void from_bytes(QueryId id, FileType type, BufferSlice bytes, string name);
void cancel(QueryId id);
void update_local_file_location(QueryId id, const LocalFileLocation &local);
void update_downloaded_part(QueryId id, int64 offset, int64 limit);
void upload_by_hash(QueryId query_id, const FullLocalFileLocation &local_location, int64 size, int8 priority);
void update_priority(QueryId query_id, int8 priority);
void from_bytes(QueryId query_id, FileType type, BufferSlice bytes, string name);
void cancel(QueryId query_id);
void update_local_file_location(QueryId query_id, const LocalFileLocation &local);
void update_downloaded_part(QueryId query_id, int64 offset, int64 limit);
void get_content(string file_path, Promise<BufferSlice> promise);

View File

@ -211,15 +211,15 @@ Status FileLoader::do_loop() {
NetQueryPtr query;
bool is_blocking;
std::tie(query, is_blocking) = std::move(query_flag);
uint64 id = UniqueId::next();
uint64 unique_id = UniqueId::next();
if (is_blocking) {
CHECK(blocking_id_ == 0);
blocking_id_ = id;
blocking_id_ = unique_id;
}
part_map_[id] = std::make_pair(part, query->cancel_slot_.get_signal_new());
// part_map_[id] = std::make_pair(part, query.get_weak());
part_map_[unique_id] = std::make_pair(part, query->cancel_slot_.get_signal_new());
// part_map_[unique_id] = std::make_pair(part, query.get_weak());
auto callback = actor_shared(this, id);
auto callback = actor_shared(this, unique_id);
if (delay_dispatcher_.empty()) {
G()->net_query_dispatcher().dispatch_with_callback(std::move(query), std::move(callback));
} else {
@ -259,15 +259,15 @@ void FileLoader::on_result(NetQueryPtr query) {
if (stop_flag_) {
return;
}
auto id = get_link_token();
if (id == blocking_id_) {
auto unique_id = get_link_token();
if (unique_id == blocking_id_) {
blocking_id_ = 0;
}
if (UniqueId::extract_key(id) == COMMON_QUERY_KEY) {
if (UniqueId::extract_key(unique_id) == COMMON_QUERY_KEY) {
on_common_query(std::move(query));
return loop();
}
auto it = part_map_.find(id);
auto it = part_map_.find(unique_id);
if (it == part_map_.end()) {
LOG(WARNING) << "Got result for unknown part";
return;

View File

@ -142,7 +142,7 @@ struct WebRemoteFileLocation {
};
inline StringBuilder &operator<<(StringBuilder &string_builder, const WebRemoteFileLocation &location) {
return string_builder << "[url = " << location.url_ << ", access_hash = " << location.access_hash_ << "]";
return string_builder << "[URL = " << location.url_ << ", access_hash = " << location.access_hash_ << "]";
}
struct CommonRemoteFileLocation {

View File

@ -1958,7 +1958,7 @@ void FileManager::flush_to_pmc(FileNodePtr node, bool new_remote, bool new_local
bool create_flag = false;
if (node->pmc_id_.empty()) {
create_flag = true;
node->pmc_id_ = file_db_->create_pmc_id();
node->pmc_id_ = file_db_->get_next_file_db_id();
}
FileData data;
@ -2114,10 +2114,10 @@ bool FileManager::set_content(FileId file_id, BufferSlice bytes) {
node->set_download_priority(FROM_BYTES_PRIORITY);
QueryId id = queries_container_.create(Query{file_id, Query::Type::SetContent});
node->download_id_ = id;
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::SetContent});
node->download_id_ = query_id;
node->is_download_started_ = true;
send_closure(file_load_manager_, &FileLoadManager::from_bytes, id, node->remote_.full.value().file_type_,
send_closure(file_load_manager_, &FileLoadManager::from_bytes, query_id, node->remote_.full.value().file_type_,
std::move(bytes), node->suggested_path());
return true;
}
@ -2423,10 +2423,10 @@ void FileManager::run_download(FileNodePtr node, bool force_update_priority) {
if (node->need_reload_photo_ && file_view.may_reload_photo()) {
LOG(INFO) << "Reload photo from file " << node->main_file_id_;
QueryId id = queries_container_.create(Query{file_id, Query::Type::DownloadReloadDialog});
node->download_id_ = id;
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::DownloadReloadDialog});
node->download_id_ = query_id;
context_->reload_photo(file_view.remote_location().get_source(),
PromiseCreator::lambda([id, actor_id = actor_id(this), file_id](Result<Unit> res) {
PromiseCreator::lambda([query_id, actor_id = actor_id(this), file_id](Result<Unit> res) {
Status error;
if (res.is_ok()) {
error = Status::Error("FILE_DOWNLOAD_ID_INVALID");
@ -2435,7 +2435,7 @@ void FileManager::run_download(FileNodePtr node, bool force_update_priority) {
}
VLOG(file_references)
<< "Got result from reload photo for file " << file_id << ": " << error;
send_closure(actor_id, &FileManager::on_error, id, std::move(error));
send_closure(actor_id, &FileManager::on_error, query_id, std::move(error));
}));
node->need_reload_photo_ = false;
return;
@ -2444,16 +2444,16 @@ void FileManager::run_download(FileNodePtr node, bool force_update_priority) {
// If file reference is needed
if (!file_view.has_active_download_remote_location()) {
VLOG(file_references) << "Do not have valid file_reference for file " << file_id;
QueryId id = queries_container_.create(Query{file_id, Query::Type::DownloadWaitFileReference});
node->download_id_ = id;
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::DownloadWaitFileReference});
node->download_id_ = query_id;
if (node->download_was_update_file_reference_) {
on_error(id, Status::Error("Can't download file: have no valid file reference"));
on_error(query_id, Status::Error("Can't download file: have no valid file reference"));
return;
}
node->download_was_update_file_reference_ = true;
context_->repair_file_reference(
file_id, PromiseCreator::lambda([id, actor_id = actor_id(this), file_id](Result<Unit> res) {
file_id, PromiseCreator::lambda([query_id, actor_id = actor_id(this), file_id](Result<Unit> res) {
Status error;
if (res.is_ok()) {
error = Status::Error("FILE_DOWNLOAD_RESTART_WITH_FILE_REFERENCE");
@ -2461,13 +2461,13 @@ void FileManager::run_download(FileNodePtr node, bool force_update_priority) {
error = res.move_as_error();
}
VLOG(file_references) << "Got result from FileSourceManager for file " << file_id << ": " << error;
send_closure(actor_id, &FileManager::on_error, id, std::move(error));
send_closure(actor_id, &FileManager::on_error, query_id, std::move(error));
}));
return;
}
QueryId id = queries_container_.create(Query{file_id, Query::Type::Download});
node->download_id_ = id;
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::Download});
node->download_id_ = query_id;
node->is_download_started_ = false;
LOG(INFO) << "Run download of file " << file_id << " of size " << node->size_ << " from "
<< node->remote_.full.value() << " with suggested name " << node->suggested_path() << " and encyption key "
@ -2480,7 +2480,7 @@ void FileManager::run_download(FileNodePtr node, bool force_update_priority) {
download_limit += download_offset;
download_offset = 0;
}
send_closure(file_load_manager_, &FileLoadManager::download, id, node->remote_.full.value(), node->local_,
send_closure(file_load_manager_, &FileLoadManager::download, query_id, node->remote_.full.value(), node->local_,
node->size_, node->suggested_path(), node->encryption_key_, node->can_search_locally_, download_offset,
download_limit, priority);
}
@ -2777,20 +2777,20 @@ void FileManager::delete_file_reference(FileId file_id, Slice file_reference) {
try_flush_node_pmc(node, "delete_file_reference");
}
void FileManager::external_file_generate_write_part(int64 id, int64 offset, string data, Promise<> promise) {
send_closure(file_generate_manager_, &FileGenerateManager::external_file_generate_write_part, id, offset,
std::move(data), std::move(promise));
void FileManager::external_file_generate_write_part(int64 generation_id, int64 offset, string data, Promise<> promise) {
send_closure(file_generate_manager_, &FileGenerateManager::external_file_generate_write_part,
static_cast<uint64>(generation_id), offset, std::move(data), std::move(promise));
}
void FileManager::external_file_generate_progress(int64 id, int64 expected_size, int64 local_prefix_size,
void FileManager::external_file_generate_progress(int64 generation_id, int64 expected_size, int64 local_prefix_size,
Promise<> promise) {
send_closure(file_generate_manager_, &FileGenerateManager::external_file_generate_progress, id, expected_size,
local_prefix_size, std::move(promise));
send_closure(file_generate_manager_, &FileGenerateManager::external_file_generate_progress,
static_cast<uint64>(generation_id), expected_size, local_prefix_size, std::move(promise));
}
void FileManager::external_file_generate_finish(int64 id, Status status, Promise<> promise) {
send_closure(file_generate_manager_, &FileGenerateManager::external_file_generate_finish, id, std::move(status),
std::move(promise));
void FileManager::external_file_generate_finish(int64 generation_id, Status status, Promise<> promise) {
send_closure(file_generate_manager_, &FileGenerateManager::external_file_generate_finish,
static_cast<uint64>(generation_id), std::move(status), std::move(promise));
}
void FileManager::run_generate(FileNodePtr node) {
@ -2847,20 +2847,20 @@ void FileManager::run_generate(FileNodePtr node) {
return;
}
QueryId id = queries_container_.create(Query{file_id, Query::Type::Generate});
node->generate_id_ = id;
send_closure(file_generate_manager_, &FileGenerateManager::generate_file, id, *node->generate_, node->local_,
node->suggested_path(), [file_manager = this, id] {
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::Generate});
node->generate_id_ = query_id;
send_closure(
file_generate_manager_, &FileGenerateManager::generate_file, query_id, *node->generate_, node->local_,
node->suggested_path(), [file_manager = this, query_id] {
class Callback final : public FileGenerateCallback {
ActorId<FileManager> actor_;
uint64 query_id_;
public:
Callback(ActorId<FileManager> actor, QueryId id) : actor_(std::move(actor)), query_id_(id) {
Callback(ActorId<FileManager> actor, QueryId query_id) : actor_(std::move(actor)), query_id_(query_id) {
}
void on_partial_generate(PartialLocalFileLocation partial_local, int64 expected_size) final {
send_closure(actor_, &FileManager::on_partial_generate, query_id_, std::move(partial_local),
expected_size);
send_closure(actor_, &FileManager::on_partial_generate, query_id_, std::move(partial_local), expected_size);
}
void on_ok(FullLocalFileLocation local) final {
send_closure(actor_, &FileManager::on_generate_ok, query_id_, std::move(local));
@ -2869,7 +2869,7 @@ void FileManager::run_generate(FileNodePtr node) {
send_closure(actor_, &FileManager::on_error, query_id_, std::move(error));
}
};
return make_unique<Callback>(file_manager->actor_id(file_manager), id);
return make_unique<Callback>(file_manager->actor_id(file_manager), query_id);
}());
LOG(INFO) << "File " << file_id << " generate request has sent to FileGenerateManager";
@ -2949,27 +2949,28 @@ void FileManager::run_upload(FileNodePtr node, vector<int> bad_parts) {
CHECK(node->upload_id_ == 0);
if (file_view.has_alive_remote_location() && !file_view.has_active_upload_remote_location() &&
can_reuse_remote_file(file_view.get_type())) {
QueryId id = queries_container_.create(Query{file_id, Query::Type::UploadWaitFileReference});
node->upload_id_ = id;
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::UploadWaitFileReference});
node->upload_id_ = query_id;
if (node->upload_was_update_file_reference_) {
on_error(id, Status::Error("Can't upload file: have no valid file reference"));
on_error(query_id, Status::Error("Can't upload file: have no valid file reference"));
return;
}
node->upload_was_update_file_reference_ = true;
context_->repair_file_reference(
node->main_file_id_, PromiseCreator::lambda([id, actor_id = actor_id(this)](Result<Unit> res) {
send_closure(actor_id, &FileManager::on_error, id, Status::Error("FILE_UPLOAD_RESTART_WITH_FILE_REFERENCE"));
context_->repair_file_reference(node->main_file_id_,
PromiseCreator::lambda([query_id, actor_id = actor_id(this)](Result<Unit> res) {
send_closure(actor_id, &FileManager::on_error, query_id,
Status::Error("FILE_UPLOAD_RESTART_WITH_FILE_REFERENCE"));
}));
return;
}
if (!node->remote_.partial && node->get_by_hash_) {
LOG(INFO) << "Get file " << node->main_file_id_ << " by hash";
QueryId id = queries_container_.create(Query{file_id, Query::Type::UploadByHash});
node->upload_id_ = id;
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::UploadByHash});
node->upload_id_ = query_id;
send_closure(file_load_manager_, &FileLoadManager::upload_by_hash, id, node->local_.full(), node->size_,
send_closure(file_load_manager_, &FileLoadManager::upload_by_hash, query_id, node->local_.full(), node->size_,
narrow_cast<int8>(-priority));
return;
}
@ -2982,9 +2983,9 @@ void FileManager::run_upload(FileNodePtr node, vector<int> bad_parts) {
expected_size = 10 << 20;
}
QueryId id = queries_container_.create(Query{file_id, Query::Type::Upload});
node->upload_id_ = id;
send_closure(file_load_manager_, &FileLoadManager::upload, id, node->local_, node->remote_.partial_or_empty(),
QueryId query_id = queries_container_.create(Query{file_id, Query::Type::Upload});
node->upload_id_ = query_id;
send_closure(file_load_manager_, &FileLoadManager::upload, query_id, node->local_, node->remote_.partial_or_empty(),
expected_size, node->encryption_key_, new_priority, std::move(bad_parts));
LOG(INFO) << "File " << file_id << " upload request has sent to FileLoadManager";
@ -4079,9 +4080,9 @@ void FileManager::hangup() {
file_generate_manager_.reset();
file_load_manager_.reset();
while (!queries_container_.empty()) {
auto ids = queries_container_.ids();
for (auto id : ids) {
on_error(id, Global::request_aborted_error());
auto query_ids = queries_container_.ids();
for (auto query_id : query_ids) {
on_error(query_id, Global::request_aborted_error());
}
}
is_closed_ = true;

View File

@ -472,9 +472,10 @@ class FileManager final : public FileLoadManager::Callback {
void delete_file(FileId file_id, Promise<Unit> promise, const char *source);
void external_file_generate_write_part(int64 id, int64 offset, string data, Promise<> promise);
void external_file_generate_progress(int64 id, int64 expected_size, int64 local_prefix_size, Promise<> promise);
void external_file_generate_finish(int64 id, Status status, Promise<> promise);
void external_file_generate_write_part(int64 generation_id, int64 offset, string data, Promise<> promise);
void external_file_generate_progress(int64 generation_id, int64 expected_size, int64 local_prefix_size,
Promise<> promise);
void external_file_generate_finish(int64 generation_id, Status status, Promise<> promise);
Result<FileId> from_persistent_id(CSlice persistent_id, FileType file_type) TD_WARN_UNUSED_RESULT;
FileView get_file_view(FileId file_id) const;
@ -661,7 +662,6 @@ class FileManager final : public FileLoadManager::Callback {
void on_force_reupload_success(FileId file_id);
// void release_file_node(FileNodeId id);
void do_cancel_download(FileNodePtr node);
void do_cancel_upload(FileNodePtr node);
void do_cancel_generate(FileNodePtr node);

View File

@ -46,17 +46,17 @@ int32 PartsManager::set_streaming_offset(int64 offset, int64 limit) {
return finish();
}
auto part_i = offset / part_size_;
if (use_part_count_limit_ && part_i >= MAX_PART_COUNT_PREMIUM) {
auto part_id = offset / part_size_;
if (use_part_count_limit_ && part_id >= MAX_PART_COUNT_PREMIUM) {
streaming_offset_ = 0;
LOG(ERROR) << "Ignore streaming_offset " << offset << " in part " << part_i;
LOG(ERROR) << "Ignore streaming_offset " << offset << " in part " << part_id;
return finish();
}
streaming_offset_ = offset;
first_streaming_empty_part_ = narrow_cast<int>(part_i);
first_streaming_not_ready_part_ = narrow_cast<int>(part_i);
first_streaming_empty_part_ = narrow_cast<int>(part_id);
first_streaming_not_ready_part_ = narrow_cast<int>(part_id);
if (part_count_ < first_streaming_empty_part_) {
part_count_ = first_streaming_empty_part_;
part_status_.resize(part_count_, PartStatus::Empty);
@ -75,9 +75,9 @@ void PartsManager::set_streaming_limit(int64 limit) {
if (streaming_limit_ == 0) {
return;
}
for (int part_i = 0; part_i < part_count_; part_i++) {
if (is_part_in_streaming_limit(part_i) && part_status_[part_i] == PartStatus::Ready) {
streaming_ready_size_ += get_part(part_i).size;
for (int part_id = 0; part_id < part_count_; part_id++) {
if (is_part_in_streaming_limit(part_id) && part_status_[part_id] == PartStatus::Ready) {
streaming_ready_size_ += get_part(part_id).size;
}
}
}
@ -231,10 +231,10 @@ string PartsManager::get_bitmask() {
return bitmask_.encode(prefix_count);
}
bool PartsManager::is_part_in_streaming_limit(int part_i) const {
CHECK(part_i < part_count_);
auto offset_begin = static_cast<int64>(part_i) * static_cast<int64>(get_part_size());
auto offset_end = offset_begin + static_cast<int64>(get_part(part_i).size);
bool PartsManager::is_part_in_streaming_limit(int part_id) const {
CHECK(part_id < part_count_);
auto offset_begin = static_cast<int64>(part_id) * static_cast<int64>(get_part_size());
auto offset_end = offset_begin + static_cast<int64>(get_part(part_id).size);
if (offset_begin >= get_expected_size()) {
return false;
@ -265,25 +265,25 @@ bool PartsManager::is_streaming_limit_reached() {
return false;
}
update_first_not_ready_part();
auto part_i = first_streaming_not_ready_part_;
auto part_id = first_streaming_not_ready_part_;
// wrap
if (!unknown_size_flag_ && part_i == part_count_) {
part_i = first_not_ready_part_;
if (!unknown_size_flag_ && part_id == part_count_) {
part_id = first_not_ready_part_;
}
if (part_i == part_count_) {
if (part_id == part_count_) {
return false;
}
return !is_part_in_streaming_limit(part_i);
return !is_part_in_streaming_limit(part_id);
}
Result<Part> PartsManager::start_part() {
update_first_empty_part();
auto part_i = first_streaming_empty_part_;
if (known_prefix_flag_ && part_i >= static_cast<int>(known_prefix_size_ / part_size_)) {
auto part_id = first_streaming_empty_part_;
if (known_prefix_flag_ && part_id >= static_cast<int>(known_prefix_size_ / part_size_)) {
return Status::Error(-1, "Wait for prefix to be known");
}
if (part_i == part_count_) {
if (part_id == part_count_) {
if (unknown_size_flag_) {
part_count_++;
if (part_count_ > MAX_PART_COUNT_PREMIUM + (use_part_count_limit_ ? 0 : 64)) {
@ -296,19 +296,19 @@ Result<Part> PartsManager::start_part() {
part_status_.push_back(PartStatus::Empty);
} else {
if (first_empty_part_ < part_count_) {
part_i = first_empty_part_;
part_id = first_empty_part_;
} else {
return get_empty_part();
}
}
}
if (!is_part_in_streaming_limit(part_i)) {
if (!is_part_in_streaming_limit(part_id)) {
return get_empty_part();
}
CHECK(part_status_[part_i] == PartStatus::Empty);
on_part_start(part_i);
return get_part(part_i);
CHECK(part_status_[part_id] == PartStatus::Empty);
on_part_start(part_id);
return get_part(part_id);
}
Status PartsManager::set_known_prefix(size_t size, bool is_ready) {
@ -341,22 +341,23 @@ Status PartsManager::set_known_prefix(size_t size, bool is_ready) {
return Status::OK();
}
Status PartsManager::on_part_ok(int32 id, size_t part_size, size_t actual_size) {
CHECK(part_status_[id] == PartStatus::Pending);
Status PartsManager::on_part_ok(int part_id, size_t part_size, size_t actual_size) {
CHECK(part_status_[part_id] == PartStatus::Pending);
pending_count_--;
part_status_[id] = PartStatus::Ready;
part_status_[part_id] = PartStatus::Ready;
if (actual_size != 0) {
bitmask_.set(id);
bitmask_.set(part_id);
}
ready_size_ += narrow_cast<int64>(actual_size);
if (streaming_limit_ > 0 && is_part_in_streaming_limit(id)) {
if (streaming_limit_ > 0 && is_part_in_streaming_limit(part_id)) {
streaming_ready_size_ += narrow_cast<int64>(actual_size);
}
VLOG(file_loader) << "Transferred part " << id << " of size " << part_size << ", total ready size = " << ready_size_;
VLOG(file_loader) << "Transferred part " << part_id << " of size " << part_size
<< ", total ready size = " << ready_size_;
int64 offset = narrow_cast<int64>(part_size_) * id;
int64 offset = narrow_cast<int64>(part_size_) * part_id;
int64 end_offset = offset + narrow_cast<int64>(actual_size);
if (unknown_size_flag_) {
CHECK(part_size == part_size_);
@ -386,20 +387,20 @@ Status PartsManager::on_part_ok(int32 id, size_t part_size, size_t actual_size)
return Status::OK();
}
void PartsManager::on_part_failed(int32 id) {
CHECK(part_status_[id] == PartStatus::Pending);
void PartsManager::on_part_failed(int32 part_id) {
CHECK(part_status_[part_id] == PartStatus::Pending);
pending_count_--;
part_status_[id] = PartStatus::Empty;
if (id < first_empty_part_) {
first_empty_part_ = id;
part_status_[part_id] = PartStatus::Empty;
if (part_id < first_empty_part_) {
first_empty_part_ = part_id;
}
if (streaming_offset_ == 0) {
first_streaming_empty_part_ = id;
first_streaming_empty_part_ = part_id;
return;
}
auto part_i = narrow_cast<int>(streaming_offset_ / part_size_);
if (id >= part_i && id < first_streaming_empty_part_) {
first_streaming_empty_part_ = id;
auto offset_part_id = narrow_cast<int>(streaming_offset_ / part_size_);
if (part_id >= offset_part_id && part_id < first_streaming_empty_part_) {
first_streaming_empty_part_ = part_id;
}
}
@ -442,9 +443,9 @@ int64 PartsManager::get_estimated_extra() const {
//TODO: delete this block if CHECK won't fail
int64 sub = 0;
for (int part_i = 0; part_i < part_count_; part_i++) {
if (is_part_in_streaming_limit(part_i) && part_status_[part_i] == PartStatus::Ready) {
sub += get_part(part_i).size;
for (int part_id = 0; part_id < part_count_; part_id++) {
if (is_part_in_streaming_limit(part_id) && part_status_[part_id] == PartStatus::Ready) {
sub += get_part(part_id).size;
}
}
CHECK(sub == streaming_ready_size_);
@ -536,25 +537,25 @@ int64 PartsManager::get_unchecked_ready_prefix_size() {
return res;
}
Part PartsManager::get_part(int id) const {
Part PartsManager::get_part(int part_id) const {
auto size = narrow_cast<int64>(part_size_);
auto offset = size * id;
auto offset = size * part_id;
auto total_size = unknown_size_flag_ ? max_size_ : get_size();
if (total_size < offset) {
size = 0;
} else {
size = min(size, total_size - offset);
}
return Part{id, offset, static_cast<size_t>(size)};
return Part{part_id, offset, static_cast<size_t>(size)};
}
Part PartsManager::get_empty_part() {
return Part{-1, 0, 0};
}
void PartsManager::on_part_start(int32 id) {
CHECK(part_status_[id] == PartStatus::Empty);
part_status_[id] = PartStatus::Pending;
void PartsManager::on_part_start(int32 part_id) {
CHECK(part_status_[part_id] == PartStatus::Empty);
part_status_[part_id] = PartStatus::Pending;
pending_count_++;
}

View File

@ -30,8 +30,8 @@ class PartsManager {
// returns empty part if nothing to return
Result<Part> start_part() TD_WARN_UNUSED_RESULT;
Status on_part_ok(int32 id, size_t part_size, size_t actual_size) TD_WARN_UNUSED_RESULT;
void on_part_failed(int32 id);
Status on_part_ok(int part_id, size_t part_size, size_t actual_size) TD_WARN_UNUSED_RESULT;
void on_part_failed(int part_id);
Status set_known_prefix(size_t size, bool is_ready);
void set_need_check();
void set_checked_prefix_size(int64 size);
@ -96,13 +96,13 @@ class PartsManager {
static Part get_empty_part();
Part get_part(int id) const;
void on_part_start(int32 id);
Part get_part(int part_id) const;
void on_part_start(int part_id);
void update_first_empty_part();
void update_first_not_ready_part();
bool is_streaming_limit_reached();
bool is_part_in_streaming_limit(int part_i) const;
bool is_part_in_streaming_limit(int part_id) const;
};
} // namespace td

View File

@ -219,7 +219,7 @@ inline StringBuilder &operator<<(StringBuilder &sb, const EncryptedInputFile &fi
}
// LogEvents
// TODO: Qts and SeqNoState could be just Logevents that are updated during regenerate
// TODO: QTS and SeqNoState could be just Logevents that are updated during regenerate
class InboundSecretMessage final : public SecretChatLogEventBase<InboundSecretMessage> {
public:
static constexpr Type type = SecretChatEvent::Type::InboundSecretMessage;

View File

@ -172,17 +172,17 @@ Status NetQueryDispatcher::wait_dc_init(DcId dc_id, bool force) {
int32 download_session_count = is_premium ? 8 : 2;
int32 download_small_session_count = is_premium ? 8 : 2;
dc.main_session_ = create_actor<SessionMultiProxy>(PSLICE() << "SessionMultiProxy:" << raw_dc_id << ":main",
session_count, auth_data, raw_dc_id == main_dc_id_, use_pfs,
false, false, is_cdn, need_destroy_key);
session_count, auth_data, true, raw_dc_id == main_dc_id_,
use_pfs, false, false, is_cdn, need_destroy_key);
dc.upload_session_ = create_actor_on_scheduler<SessionMultiProxy>(
PSLICE() << "SessionMultiProxy:" << raw_dc_id << ":upload", slow_net_scheduler_id, upload_session_count,
auth_data, false, use_pfs, false, true, is_cdn, need_destroy_key);
auth_data, false, false, use_pfs, false, true, is_cdn, need_destroy_key);
dc.download_session_ = create_actor_on_scheduler<SessionMultiProxy>(
PSLICE() << "SessionMultiProxy:" << raw_dc_id << ":download", slow_net_scheduler_id, download_session_count,
auth_data, false, use_pfs, true, true, is_cdn, need_destroy_key);
auth_data, false, false, use_pfs, true, true, is_cdn, need_destroy_key);
dc.download_small_session_ = create_actor_on_scheduler<SessionMultiProxy>(
PSLICE() << "SessionMultiProxy:" << raw_dc_id << ":download_small", slow_net_scheduler_id,
download_small_session_count, auth_data, false, use_pfs, true, true, is_cdn, need_destroy_key);
download_small_session_count, auth_data, false, false, use_pfs, true, true, is_cdn, need_destroy_key);
dc.is_inited_ = true;
if (dc_id.is_internal()) {
send_closure_later(dc_auth_manager_, &DcAuthManager::add_dc, std::move(auth_data));

View File

@ -232,9 +232,14 @@ bool Session::PriorityQueue::empty() const {
}
Session::Session(unique_ptr<Callback> callback, std::shared_ptr<AuthDataShared> shared_auth_data, int32 raw_dc_id,
int32 dc_id, bool is_main, bool use_pfs, bool is_cdn, bool need_destroy,
int32 dc_id, bool is_primary, bool is_main, bool use_pfs, bool is_cdn, bool need_destroy,
const mtproto::AuthKey &tmp_auth_key, const vector<mtproto::ServerSalt> &server_salts)
: raw_dc_id_(raw_dc_id), dc_id_(dc_id), is_main_(is_main), is_cdn_(is_cdn), need_destroy_(need_destroy) {
: raw_dc_id_(raw_dc_id)
, dc_id_(dc_id)
, is_primary_(is_primary)
, is_main_(is_main)
, is_cdn_(is_cdn)
, need_destroy_(need_destroy) {
VLOG(dc) << "Start connection " << tag("need_destroy", need_destroy_);
if (need_destroy_) {
use_pfs = false;
@ -333,30 +338,33 @@ void Session::on_network(bool network_flag, uint32 network_generation) {
}
void Session::on_online(bool online_flag) {
LOG(DEBUG) << "Set online flag to " << online_flag;
online_flag_ = online_flag;
connection_online_update(true);
loop();
}
void Session::on_logging_out(bool logging_out_flag) {
LOG(DEBUG) << "Set logging out flag to " << logging_out_flag;
logging_out_flag_ = logging_out_flag;
connection_online_update(true);
loop();
}
void Session::connection_online_update(bool force) {
bool new_connection_online_flag = (online_flag_ || logging_out_flag_) &&
(has_queries() || last_activity_timestamp_ + 10 > Time::now_cached() || is_main_);
bool new_connection_online_flag =
(online_flag_ || logging_out_flag_) &&
(has_queries() || last_activity_timestamp_ + 10 > Time::now_cached() || is_primary_);
if (connection_online_flag_ == new_connection_online_flag && !force) {
return;
}
connection_online_flag_ = new_connection_online_flag;
VLOG(dc) << "Set connection_online " << connection_online_flag_;
if (main_connection_.connection_) {
main_connection_.connection_->set_online(connection_online_flag_, is_main_);
main_connection_.connection_->set_online(connection_online_flag_, is_primary_);
}
if (long_poll_connection_.connection_) {
long_poll_connection_.connection_->set_online(connection_online_flag_, is_main_);
long_poll_connection_.connection_->set_online(connection_online_flag_, is_primary_);
}
}
@ -685,9 +693,9 @@ void Session::on_closed(Status status) {
current_info_->state_ = ConnectionInfo::State::Empty;
}
void Session::on_session_created(uint64 unique_id, uint64 first_id) {
void Session::on_session_created(uint64 unique_id, uint64 first_message_id) {
// TODO: use unique_id
LOG(INFO) << "New session " << unique_id << " created with first message_id " << first_id;
LOG(INFO) << "New session " << unique_id << " created with first message_id " << first_message_id;
if (!use_pfs_ && !auth_data_.use_pfs()) {
last_success_timestamp_ = Time::now();
}
@ -701,7 +709,7 @@ void Session::on_session_created(uint64 unique_id, uint64 first_id) {
for (auto it = sent_queries_.begin(); it != sent_queries_.end();) {
Query *query_ptr = &it->second;
if (query_ptr->container_id < first_id) {
if (query_ptr->container_message_id < first_message_id) {
// container vector leak otherwise
cleanup_container(it->first, &it->second);
mark_as_known(it->first, &it->second);
@ -729,30 +737,30 @@ void Session::on_session_failed(Status status) {
callback_->on_failed();
}
void Session::on_container_sent(uint64 container_id, vector<uint64> msg_ids) {
CHECK(container_id != 0);
void Session::on_container_sent(uint64 container_message_id, vector<uint64> message_ids) {
CHECK(container_message_id != 0);
td::remove_if(msg_ids, [&](uint64 msg_id) {
auto it = sent_queries_.find(msg_id);
td::remove_if(message_ids, [&](uint64 message_id) {
auto it = sent_queries_.find(message_id);
if (it == sent_queries_.end()) {
return true; // remove
}
it->second.container_id = container_id;
it->second.container_message_id = container_message_id;
return false;
});
if (msg_ids.empty()) {
if (message_ids.empty()) {
return;
}
auto size = msg_ids.size();
sent_containers_.emplace(container_id, ContainerInfo{size, std::move(msg_ids)});
auto size = message_ids.size();
sent_containers_.emplace(container_message_id, ContainerInfo{size, std::move(message_ids)});
}
void Session::on_message_ack(uint64 id) {
on_message_ack_impl(id, 1);
void Session::on_message_ack(uint64 message_id) {
on_message_ack_impl(message_id, 1);
}
void Session::on_message_ack_impl(uint64 id, int32 type) {
auto cit = sent_containers_.find(id);
void Session::on_message_ack_impl(uint64 container_message_id, int32 type) {
auto cit = sent_containers_.find(container_message_id);
if (cit != sent_containers_.end()) {
auto container_info = std::move(cit->second);
sent_containers_.erase(cit);
@ -763,15 +771,15 @@ void Session::on_message_ack_impl(uint64 id, int32 type) {
return;
}
on_message_ack_impl_inner(id, type, false);
on_message_ack_impl_inner(container_message_id, type, false);
}
void Session::on_message_ack_impl_inner(uint64 id, int32 type, bool in_container) {
auto it = sent_queries_.find(id);
void Session::on_message_ack_impl_inner(uint64 message_id, int32 type, bool in_container) {
auto it = sent_queries_.find(message_id);
if (it == sent_queries_.end()) {
return;
}
VLOG(net_query) << "Ack " << tag("msg_id", id) << it->second.query;
VLOG(net_query) << "Ack " << tag("message_id", message_id) << it->second.query;
it->second.ack = true;
{
auto lock = it->second.query->lock();
@ -779,17 +787,17 @@ void Session::on_message_ack_impl_inner(uint64 id, int32 type, bool in_container
}
it->second.query->quick_ack_promise_.set_value(Unit());
if (!in_container) {
cleanup_container(id, &it->second);
cleanup_container(message_id, &it->second);
}
mark_as_known(it->first, &it->second);
}
void Session::dec_container(uint64 message_id, Query *query) {
if (query->container_id == message_id) {
void Session::dec_container(uint64 container_message_id, Query *query) {
if (query->container_message_id == container_message_id) {
// message was sent without any container
return;
}
auto it = sent_containers_.find(query->container_id);
auto it = sent_containers_.find(query->container_message_id);
if (it == sent_containers_.end()) {
return;
}
@ -800,18 +808,18 @@ void Session::dec_container(uint64 message_id, Query *query) {
}
}
void Session::cleanup_container(uint64 message_id, Query *query) {
if (query->container_id == message_id) {
void Session::cleanup_container(uint64 container_message_id, Query *query) {
if (query->container_message_id == container_message_id) {
// message was sent without any container
return;
}
// we can forget container now, since we have an answer for its part.
// TODO: we can do it only for one element per container
sent_containers_.erase(query->container_id);
sent_containers_.erase(query->container_message_id);
}
void Session::mark_as_known(uint64 id, Query *query) {
void Session::mark_as_known(uint64 message_id, Query *query) {
{
auto lock = query->query->lock();
query->query->get_data_unsafe().unknown_state_ = false;
@ -819,15 +827,15 @@ void Session::mark_as_known(uint64 id, Query *query) {
if (!query->unknown) {
return;
}
VLOG(net_query) << "Mark as known " << tag("msg_id", id) << query->query;
VLOG(net_query) << "Mark as known " << tag("message_id", message_id) << query->query;
query->unknown = false;
unknown_queries_.erase(id);
unknown_queries_.erase(message_id);
if (unknown_queries_.empty()) {
flush_pending_invoke_after_queries();
}
}
void Session::mark_as_unknown(uint64 id, Query *query) {
void Session::mark_as_unknown(uint64 message_id, Query *query) {
{
auto lock = query->query->lock();
query->query->get_data_unsafe().unknown_state_ = true;
@ -835,10 +843,10 @@ void Session::mark_as_unknown(uint64 id, Query *query) {
if (query->unknown) {
return;
}
VLOG(net_query) << "Mark as unknown " << tag("msg_id", id) << query->query;
VLOG(net_query) << "Mark as unknown " << tag("message_id", message_id) << query->query;
query->unknown = true;
CHECK(id != 0);
unknown_queries_.insert(id);
CHECK(message_id != 0);
unknown_queries_.insert(message_id);
}
Status Session::on_update(BufferSlice packet) {
@ -854,16 +862,16 @@ Status Session::on_update(BufferSlice packet) {
return Status::OK();
}
Status Session::on_message_result_ok(uint64 id, BufferSlice packet, size_t original_size) {
Status Session::on_message_result_ok(uint64 message_id, BufferSlice packet, size_t original_size) {
last_success_timestamp_ = Time::now();
TlParser parser(packet.as_slice());
int32 ID = parser.fetch_int();
auto it = sent_queries_.find(id);
auto it = sent_queries_.find(message_id);
if (it == sent_queries_.end()) {
LOG(DEBUG) << "Drop result to " << tag("request_id", format::as_hex(id)) << tag("original_size", original_size)
<< tag("tl", format::as_hex(ID));
LOG(DEBUG) << "Drop result to " << tag("message_id", format::as_hex(message_id))
<< tag("original_size", original_size) << tag("tl", format::as_hex(ID));
if (original_size > 16 * 1024) {
dropped_size_ += original_size;
@ -893,8 +901,8 @@ Status Session::on_message_result_ok(uint64 id, BufferSlice packet, size_t origi
}
}
cleanup_container(id, query_ptr);
mark_as_known(id, query_ptr);
cleanup_container(message_id, query_ptr);
mark_as_known(message_id, query_ptr);
query_ptr->query->on_net_read(original_size);
query_ptr->query->set_ok(std::move(packet));
query_ptr->query->set_message_id(0);
@ -905,7 +913,7 @@ Status Session::on_message_result_ok(uint64 id, BufferSlice packet, size_t origi
return Status::OK();
}
void Session::on_message_result_error(uint64 id, int error_code, string message) {
void Session::on_message_result_error(uint64 message_id, int error_code, string message) {
if (!check_utf8(message)) {
LOG(ERROR) << "Receive invalid error message \"" << message << '"';
message = "INVALID_UTF8_ERROR_MESSAGE";
@ -962,7 +970,7 @@ void Session::on_message_result_error(uint64 id, int error_code, string message)
error_code = 500;
}
if (id == 0) {
if (message_id == 0) {
LOG(ERROR) << "Received an error update";
return;
}
@ -974,7 +982,7 @@ void Session::on_message_result_error(uint64 id, int error_code, string message)
} else {
LOG(DEBUG) << "Receive error " << error_code << " : " << message;
}
auto it = sent_queries_.find(id);
auto it = sent_queries_.find(message_id);
if (it == sent_queries_.end()) {
current_info_->connection_->force_ack();
return;
@ -983,8 +991,8 @@ void Session::on_message_result_error(uint64 id, int error_code, string message)
Query *query_ptr = &it->second;
VLOG(net_query) << "Return query error " << query_ptr->query;
cleanup_container(id, query_ptr);
mark_as_known(id, query_ptr);
cleanup_container(message_id, query_ptr);
mark_as_known(message_id, query_ptr);
query_ptr->query->set_error(Status::Error(error_code, message), current_info_->connection_->get_name().str());
query_ptr->query->set_message_id(0);
query_ptr->query->cancel_slot_.clear_event();
@ -993,18 +1001,18 @@ void Session::on_message_result_error(uint64 id, int error_code, string message)
sent_queries_.erase(it);
}
void Session::on_message_failed_inner(uint64 id, bool in_container) {
LOG(INFO) << "Message inner failed " << id;
auto it = sent_queries_.find(id);
void Session::on_message_failed_inner(uint64 message_id, bool in_container) {
LOG(INFO) << "Message inner failed " << message_id;
auto it = sent_queries_.find(message_id);
if (it == sent_queries_.end()) {
return;
}
Query *query_ptr = &it->second;
if (!in_container) {
cleanup_container(id, query_ptr);
cleanup_container(message_id, query_ptr);
}
mark_as_known(id, query_ptr);
mark_as_known(message_id, query_ptr);
query_ptr->query->set_message_id(0);
query_ptr->query->cancel_slot_.clear_event();
@ -1013,26 +1021,26 @@ void Session::on_message_failed_inner(uint64 id, bool in_container) {
sent_queries_.erase(it);
}
void Session::on_message_failed(uint64 id, Status status) {
LOG(INFO) << "Message failed: " << tag("id", id) << tag("status", status);
void Session::on_message_failed(uint64 message_id, Status status) {
LOG(INFO) << "Message failed: " << tag("message_id", message_id) << tag("status", status);
status.ignore();
auto cit = sent_containers_.find(id);
auto cit = sent_containers_.find(message_id);
if (cit != sent_containers_.end()) {
auto container_info = std::move(cit->second);
sent_containers_.erase(cit);
for (auto message_id : container_info.message_ids) {
on_message_failed_inner(message_id, true);
for (auto contained_message_id : container_info.message_ids) {
on_message_failed_inner(contained_message_id, true);
}
return;
}
on_message_failed_inner(id, false);
on_message_failed_inner(message_id, false);
}
void Session::on_message_info(uint64 id, int32 state, uint64 answer_id, int32 answer_size) {
auto it = sent_queries_.find(id);
void Session::on_message_info(uint64 message_id, int32 state, uint64 answer_message_id, int32 answer_size) {
auto it = sent_queries_.find(message_id);
if (it != sent_queries_.end()) {
if (it->second.query->update_is_ready()) {
dec_container(it->first, &it->second);
@ -1046,7 +1054,7 @@ void Session::on_message_info(uint64 id, int32 state, uint64 answer_id, int32 an
return;
}
}
if (id != 0) {
if (message_id != 0) {
if (it == sent_queries_.end()) {
return;
}
@ -1055,30 +1063,31 @@ void Session::on_message_info(uint64 id, int32 state, uint64 answer_id, int32 an
case 2:
case 3:
// message not received by server
return on_message_failed(id, Status::Error("Unknown message identifier"));
return on_message_failed(message_id, Status::Error("Unknown message identifier"));
case 0:
if (answer_id == 0) {
LOG(ERROR) << "Unexpected message_info.state == 0 " << tag("id", id) << tag("state", state)
<< tag("answer_id", answer_id);
return on_message_failed(id, Status::Error("Unexpected message_info.state == 0"));
if (answer_message_id == 0) {
LOG(ERROR) << "Unexpected message_info.state == 0 " << tag("message_id", message_id) << tag("state", state)
<< tag("answer_message_id", answer_message_id);
return on_message_failed(message_id, Status::Error("Unexpected message_info.state == 0"));
}
// fallthrough
case 4:
on_message_ack_impl(id, 2);
on_message_ack_impl(message_id, 2);
break;
default:
LOG(ERROR) << "Invalid message info " << tag("state", state);
}
}
// ok, we are waiting for result of id. let's ask to resend it
if (answer_id != 0) {
// ok, we are waiting for result of message_id. let's ask to resend it
if (answer_message_id != 0) {
if (it != sent_queries_.end()) {
VLOG_IF(net_query, id != 0) << "Resend answer " << tag("msg_id", id) << tag("answer_id", answer_id)
VLOG_IF(net_query, message_id != 0)
<< "Resend answer " << tag("message_id", message_id) << tag("answer_message_id", answer_message_id)
<< tag("answer_size", answer_size) << it->second.query;
it->second.query->debug("Session: resend answer");
}
current_info_->connection_->resend_answer(answer_id);
current_info_->connection_->resend_answer(answer_message_id);
}
}
@ -1152,9 +1161,10 @@ void Session::connection_send_query(ConnectionInfo *info, NetQueryPtr &&net_quer
message_id = auth_data_.next_message_id(Time::now_cached());
}
}
VLOG(net_query) << "Send query to connection " << net_query << " [msg_id:" << format::as_hex(message_id) << "]"
<< tag("invoke_after",
transform(invoke_after_ids, [](auto id) { return PSTRING() << format::as_hex(id); }));
VLOG(net_query) << "Send query to connection " << net_query << " [message_id:" << format::as_hex(message_id) << "]"
<< tag("invoke_after", transform(invoke_after_ids, [](auto message_id) {
return PSTRING() << format::as_hex(message_id);
}));
net_query->set_message_id(message_id);
net_query->cancel_slot_.clear_event();
{
@ -1257,7 +1267,7 @@ void Session::connection_open_finish(ConnectionInfo *info,
VLOG(dc) << "Change mode " << mode_ << "--->" << expected_mode;
mode_ = expected_mode;
if (info->connection_id_ == 1 && mode_ != Mode::Http) {
LOG(WARNING) << "Got tcp connection for long poll connection";
LOG(WARNING) << "Receive TCP connection for long poll connection";
connection_add(std::move(raw_connection));
info->state_ = ConnectionInfo::State::Empty;
yield();
@ -1269,14 +1279,14 @@ void Session::connection_open_finish(ConnectionInfo *info,
Slice mode_name;
if (mode_ == Mode::Tcp) {
mode = mtproto::SessionConnection::Mode::Tcp;
mode_name = Slice("Tcp");
mode_name = Slice("TCP");
} else {
if (info->connection_id_ == 0) {
mode = mtproto::SessionConnection::Mode::Http;
mode_name = Slice("Http");
mode_name = Slice("HTTP");
} else {
mode = mtproto::SessionConnection::Mode::HttpLongPoll;
mode_name = Slice("HttpLongPoll");
mode_name = Slice("LongPoll");
}
}
auto name = PSTRING() << get_name() << "::Connect::" << mode_name << "::" << raw_connection->extra().debug_str;
@ -1285,7 +1295,7 @@ void Session::connection_open_finish(ConnectionInfo *info,
if (can_destroy_auth_key()) {
info->connection_->destroy_key();
}
info->connection_->set_online(connection_online_flag_, is_main_);
info->connection_->set_online(connection_online_flag_, is_primary_);
info->connection_->set_name(name);
Scheduler::subscribe(info->connection_->get_poll_info().extract_pollable_fd(this));
info->mode_ = mode_;
@ -1298,11 +1308,11 @@ void Session::connection_open_finish(ConnectionInfo *info,
return;
}
if (info->ask_info_) {
for (auto &id : unknown_queries_) {
info->connection_->get_state_info(id);
for (auto &message_id : unknown_queries_) {
info->connection_->get_state_info(message_id);
}
for (auto &id : to_cancel_) {
info->connection_->cancel_answer(id);
for (auto &message_id : to_cancel_) {
info->connection_->cancel_answer(message_id);
}
to_cancel_.clear();
}

View File

@ -67,8 +67,8 @@ class Session final
};
Session(unique_ptr<Callback> callback, std::shared_ptr<AuthDataShared> shared_auth_data, int32 raw_dc_id, int32 dc_id,
bool is_main, bool use_pfs, bool is_cdn, bool need_destroy, const mtproto::AuthKey &tmp_auth_key,
const vector<mtproto::ServerSalt> &server_salts);
bool is_primary, bool is_main, bool use_pfs, bool is_cdn, bool need_destroy,
const mtproto::AuthKey &tmp_auth_key, const vector<mtproto::ServerSalt> &server_salts);
void send(NetQueryPtr &&query);
@ -78,7 +78,7 @@ class Session final
private:
struct Query final : private ListNode {
uint64 container_id;
uint64 container_message_id;
NetQueryPtr query;
bool ack = false;
@ -87,7 +87,7 @@ class Session final
int8 connection_id;
double sent_at_;
Query(uint64 message_id, NetQueryPtr &&q, int8 connection_id, double sent_at)
: container_id(message_id), query(std::move(q)), connection_id(connection_id), sent_at_(sent_at) {
: container_message_id(message_id), query(std::move(q)), connection_id(connection_id), sent_at_(sent_at) {
}
ListNode *get_list_node() {
@ -101,20 +101,21 @@ class Session final
// When connection is closed, mark all queries without ack as unknown.
// Ask state of all unknown queries when new connection is created.
//
// Just re-ask answer_id each time we get information about it.
// Just re-ask answer_message_id each time we get information about it.
// Though mtproto::Connection must ensure delivery of such query.
int32 raw_dc_id_; // numerical datacenter ID, i.e. 2
int32 dc_id_; // unique datacenter ID, i.e. -10002
enum class Mode : int8 { Tcp, Http } mode_ = Mode::Tcp;
bool is_main_; // true only for the primary Session(s) to the main DC
bool is_cdn_;
bool need_destroy_;
const int32 raw_dc_id_; // numerical datacenter ID, i.e. 2
const int32 dc_id_; // unique datacenter ID, i.e. -10002
const bool is_primary_; // true for primary Sessions to all DCs
const bool is_main_; // true only for the primary Session(s) to the main DC
const bool is_cdn_;
const bool need_destroy_;
bool was_on_network_ = false;
bool network_flag_ = false;
bool online_flag_ = false;
bool logging_out_flag_ = false;
bool connection_online_flag_ = false;
enum class Mode : int8 { Tcp, Http } mode_ = Mode::Tcp;
uint32 network_generation_ = 0;
uint64 being_binded_tmp_auth_key_id_ = 0;
uint64 being_checked_main_auth_key_id_ = 0;
@ -208,33 +209,33 @@ class Session final
void on_server_salt_updated() final;
void on_server_time_difference_updated() final;
void on_session_created(uint64 unique_id, uint64 first_id) final;
void on_session_created(uint64 unique_id, uint64 first_message_id) final;
void on_session_failed(Status status) final;
void on_container_sent(uint64 container_id, vector<uint64> msg_ids) final;
void on_container_sent(uint64 container_message_id, vector<uint64> message_ids) final;
Status on_update(BufferSlice packet) final;
void on_message_ack(uint64 id) final;
Status on_message_result_ok(uint64 id, BufferSlice packet, size_t original_size) final;
void on_message_result_error(uint64 id, int error_code, string message) final;
void on_message_failed(uint64 id, Status status) final;
void on_message_ack(uint64 message_id) final;
Status on_message_result_ok(uint64 message_id, BufferSlice packet, size_t original_size) final;
void on_message_result_error(uint64 message_id, int error_code, string message) final;
void on_message_failed(uint64 message_id, Status status) final;
void on_message_info(uint64 id, int32 state, uint64 answer_id, int32 answer_size) final;
void on_message_info(uint64 message_id, int32 state, uint64 answer_message_id, int32 answer_size) final;
Status on_destroy_auth_key() final;
void flush_pending_invoke_after_queries();
bool has_queries() const;
void dec_container(uint64 message_id, Query *query);
void cleanup_container(uint64 id, Query *query);
void mark_as_known(uint64 id, Query *query);
void mark_as_unknown(uint64 id, Query *query);
void dec_container(uint64 container_message_id, Query *query);
void cleanup_container(uint64 container_message_id, Query *query);
void mark_as_known(uint64 message_id, Query *query);
void mark_as_unknown(uint64 message_id, Query *query);
void on_message_ack_impl(uint64 id, int32 type);
void on_message_ack_impl_inner(uint64 id, int32 type, bool in_container);
void on_message_failed_inner(uint64 id, bool in_container);
void on_message_ack_impl(uint64 container_message_id, int32 type);
void on_message_ack_impl_inner(uint64 message_id, int32 type, bool in_container);
void on_message_failed_inner(uint64 message_id, bool in_container);
// send NetQueryPtr to parent
void return_query(NetQueryPtr &&query);
@ -268,7 +269,7 @@ class Session final
void raw_event(const Event::Raw &event) final;
friend StringBuilder &operator<<(StringBuilder &sb, Mode mode) {
return sb << (mode == Mode::Http ? "Http" : "Tcp");
return sb << (mode == Mode::Http ? "HTTP" : "TCP");
}
};

View File

@ -18,14 +18,14 @@
namespace td {
SessionMultiProxy::SessionMultiProxy() = default;
SessionMultiProxy::~SessionMultiProxy() = default;
SessionMultiProxy::SessionMultiProxy(int32 session_count, std::shared_ptr<AuthDataShared> shared_auth_data,
bool is_main, bool use_pfs, bool allow_media_only, bool is_media, bool is_cdn,
bool need_destroy_auth_key)
bool is_primary, bool is_main, bool use_pfs, bool allow_media_only, bool is_media,
bool is_cdn, bool need_destroy_auth_key)
: session_count_(session_count)
, auth_data_(std::move(shared_auth_data))
, is_primary_(is_primary)
, is_main_(is_main)
, use_pfs_(use_pfs)
, allow_media_only_(allow_media_only)
@ -142,8 +142,8 @@ void SessionMultiProxy::init() {
int32 session_id_;
};
info.proxy = create_actor<SessionProxy>(name, make_unique<Callback>(actor_id(this), sessions_generation_, i),
auth_data_, is_main_, allow_media_only_, is_media_, get_pfs_flag(), is_cdn_,
need_destroy_auth_key_ && i == 0);
auth_data_, is_primary_, is_main_, allow_media_only_, is_media_,
get_pfs_flag(), is_cdn_, need_destroy_auth_key_ && i == 0);
sessions_.push_back(std::move(info));
}
}

View File

@ -19,12 +19,12 @@ class SessionProxy;
class SessionMultiProxy final : public Actor {
public:
SessionMultiProxy();
SessionMultiProxy(int32 session_count, std::shared_ptr<AuthDataShared> shared_auth_data, bool is_primary,
bool is_main, bool use_pfs, bool allow_media_only, bool is_media, bool is_cdn,
bool need_destroy_auth_key);
SessionMultiProxy(const SessionMultiProxy &other) = delete;
SessionMultiProxy &operator=(const SessionMultiProxy &other) = delete;
~SessionMultiProxy() final;
SessionMultiProxy(int32 session_count, std::shared_ptr<AuthDataShared> shared_auth_data, bool is_main, bool use_pfs,
bool allow_media_only, bool is_media, bool is_cdn, bool need_destroy_auth_key);
void send(NetQueryPtr query);
void update_main_flag(bool is_main);
@ -39,6 +39,7 @@ class SessionMultiProxy final : public Actor {
private:
int32 session_count_ = 0;
std::shared_ptr<AuthDataShared> auth_data_;
const bool is_primary_;
bool is_main_ = false;
bool use_pfs_ = false;
bool allow_media_only_ = false;

View File

@ -78,10 +78,11 @@ class SessionCallback final : public Session::Callback {
};
SessionProxy::SessionProxy(unique_ptr<Callback> callback, std::shared_ptr<AuthDataShared> shared_auth_data,
bool is_main, bool allow_media_only, bool is_media, bool use_pfs, bool is_cdn,
bool need_destroy)
bool is_primary, bool is_main, bool allow_media_only, bool is_media, bool use_pfs,
bool is_cdn, bool need_destroy)
: callback_(std::move(callback))
, auth_data_(std::move(shared_auth_data))
, is_primary_(is_primary)
, is_main_(is_main)
, allow_media_only_(allow_media_only)
, is_media_(is_media)
@ -212,7 +213,8 @@ void SessionProxy::open_session(bool force) {
session_ = create_actor<Session>(
name,
make_unique<SessionCallback>(actor_shared(this, session_generation_), dc_id, allow_media_only_, is_media_, hash),
auth_data_, raw_dc_id, int_dc_id, is_main_, use_pfs_, is_cdn_, need_destroy_, tmp_auth_key_, server_salts_);
auth_data_, raw_dc_id, int_dc_id, is_primary_, is_main_, use_pfs_, is_cdn_, need_destroy_, tmp_auth_key_,
server_salts_);
}
void SessionProxy::update_auth_key_state() {

View File

@ -29,8 +29,8 @@ class SessionProxy final : public Actor {
virtual void on_query_finished() = 0;
};
SessionProxy(unique_ptr<Callback> callback, std::shared_ptr<AuthDataShared> shared_auth_data, bool is_main,
bool allow_media_only, bool is_media, bool use_pfs, bool is_cdn, bool need_destroy);
SessionProxy(unique_ptr<Callback> callback, std::shared_ptr<AuthDataShared> shared_auth_data, bool is_primary,
bool is_main, bool allow_media_only, bool is_media, bool use_pfs, bool is_cdn, bool need_destroy);
void send(NetQueryPtr query);
void update_main_flag(bool is_main);
@ -41,6 +41,7 @@ class SessionProxy final : public Actor {
unique_ptr<Callback> callback_;
std::shared_ptr<AuthDataShared> auth_data_;
AuthKeyState auth_key_state_ = AuthKeyState::Empty;
const bool is_primary_;
bool is_main_;
bool allow_media_only_;
bool is_media_;

View File

@ -187,7 +187,7 @@ using tl_object_ptr = tl::unique_ptr<Type>;
* A function to create a dynamically allocated TL-object. Can be treated as an analogue of std::make_unique.
* Usage example:
* \code
* auto get_authorization_state_request = td::make_tl_object<td::td_api::getAuthorizationState>();
* auto get_me_request = td::make_tl_object<td::td_api::getMe>();
* auto message_text = td::make_tl_object<td::td_api::formattedText>("Hello, world!!!",
* td::td_api::array<td::tl_object_ptr<td::td_api::textEntity>>());
* auto send_message_request = td::make_tl_object<td::td_api::sendMessage>(chat_id, 0, 0, nullptr, nullptr,

View File

@ -79,8 +79,6 @@ class Actor : public ObserverBase {
std::shared_ptr<ActorContext> set_context(std::shared_ptr<ActorContext> context);
string set_tag(string tag);
void always_wait_for_mailbox();
// for ActorInfo mostly
void init(ObjectPool<ActorInfo>::OwnerPtr &&info);
ActorInfo *get_info();

View File

@ -164,8 +164,4 @@ inline Slice Actor::get_name() const {
return info_->get_name();
}
inline void Actor::always_wait_for_mailbox() {
info_->always_wait_for_mailbox();
}
} // namespace td

View File

@ -108,17 +108,11 @@ class ActorInfo final
bool need_context() const;
bool need_start_up() const;
void set_wait_generation(uint32 wait_generation);
bool must_wait(uint32 wait_generation) const;
void always_wait_for_mailbox();
private:
Deleter deleter_ = Deleter::None;
bool need_context_ = true;
bool need_start_up_ = true;
bool is_running_ = false;
bool always_wait_for_mailbox_{false};
uint32 wait_generation_{0};
std::atomic<int32> sched_id_{0};
Actor *actor_ = nullptr;

View File

@ -50,7 +50,6 @@ inline void ActorInfo::init(int32 sched_id, Slice name, ObjectPool<ActorInfo>::O
need_context_ = need_context;
need_start_up_ = need_start_up;
is_running_ = false;
wait_generation_ = 0;
}
inline bool ActorInfo::need_context() const {
@ -61,18 +60,6 @@ inline bool ActorInfo::need_start_up() const {
return need_start_up_;
}
inline void ActorInfo::set_wait_generation(uint32 wait_generation) {
wait_generation_ = wait_generation;
}
inline bool ActorInfo::must_wait(uint32 wait_generation) const {
return wait_generation_ == wait_generation || (always_wait_for_mailbox_ && !mailbox_.empty());
}
inline void ActorInfo::always_wait_for_mailbox() {
always_wait_for_mailbox_ = true;
}
inline void ActorInfo::on_actor_moved(Actor *actor_new_ptr) {
actor_ = actor_new_ptr;
}
@ -82,8 +69,8 @@ inline void ActorInfo::clear() {
CHECK(!actor_);
CHECK(!is_running());
CHECK(!is_migrating());
// NB: must be in non migrating state
// store invalid scheduler id.
// NB: must be in non-migrating state
// store invalid scheduler identifier
sched_id_.store((1 << 30) - 1, std::memory_order_relaxed);
VLOG(actor) << "Clear context " << context_.get() << " for " << get_name();
context_.reset();

View File

@ -200,8 +200,6 @@ class Scheduler {
template <ActorSendType send_type, class RunFuncT, class EventFuncT>
void send_impl(const ActorId<> &actor_id, const RunFuncT &run_func, const EventFuncT &event_func);
void inc_wait_generation();
Timestamp run_timeout();
void run_mailbox();
Timestamp run_events(Timestamp timeout);
@ -231,7 +229,6 @@ class Scheduler {
bool has_guard_ = false;
bool close_flag_ = false;
uint32 wait_generation_ = 1;
int32 sched_id_ = 0;
int32 sched_n_ = 0;
std::shared_ptr<MpscPollableQueue<EventFull>> inbound_queue_;

View File

@ -497,7 +497,6 @@ void Scheduler::run_mailbox() {
ListNode *node = actors_list.get();
CHECK(node);
auto actor_info = ActorInfo::from_list_node(node);
inc_wait_generation();
flush_mailbox(actor_info, static_cast<void (*)(ActorInfo *)>(nullptr), static_cast<Event (*)()>(nullptr));
}
VLOG(actor) << "Run mailbox : finish " << actor_count_;
@ -526,7 +525,6 @@ Timestamp Scheduler::run_timeout() {
while (!timeout_queue_.empty() && timeout_queue_.top_key() < now) {
HeapNode *node = timeout_queue_.pop();
ActorInfo *actor_info = ActorInfo::from_heap_node(node);
inc_wait_generation();
send<ActorSendType::Immediate>(actor_info->actor_id(), Event::timeout());
}
return get_timeout();

View File

@ -191,10 +191,6 @@ inline void Scheduler::before_tail_send(const ActorId<> &actor_id) {
// TODO
}
inline void Scheduler::inc_wait_generation() {
wait_generation_ += 2;
}
template <ActorSendType send_type, class RunFuncT, class EventFuncT>
void Scheduler::send_impl(const ActorId<> &actor_id, const RunFuncT &run_func, const EventFuncT &event_func) {
ActorInfo *actor_info = actor_id.get_actor_info();
@ -210,19 +206,12 @@ void Scheduler::send_impl(const ActorId<> &actor_id, const RunFuncT &run_func, c
CHECK(has_guard_ || !on_current_sched);
if (likely(send_type == ActorSendType::Immediate && on_current_sched && !actor_info->is_running() &&
!actor_info->must_wait(wait_generation_))) { // run immediately
if (likely(actor_info->mailbox_.empty())) {
actor_info->mailbox_.empty())) { // run immediately
EventGuard guard(this, actor_info);
run_func(actor_info);
} else {
flush_mailbox(actor_info, &run_func, &event_func);
}
} else {
if (on_current_sched) {
add_to_mailbox(actor_info, event_func());
if (send_type == ActorSendType::Later) {
actor_info->set_wait_generation(wait_generation_);
}
} else {
send_to_scheduler(actor_sched_id, actor_id, event_func());
}

View File

@ -571,7 +571,6 @@ TEST(Actors, stop_in_teardown) {
class AlwaysWaitForMailbox final : public td::Actor {
public:
void start_up() final {
always_wait_for_mailbox();
td::create_actor<td::SleepActor>("Sleep", 0.1,
td::PromiseCreator::lambda([actor_id = actor_id(this), ptr = this](td::Unit) {
td::send_closure(actor_id, &AlwaysWaitForMailbox::g);

View File

@ -120,7 +120,7 @@ class BinlogKeyValue final : public KeyValueSyncInterface {
SeqNo set(string key, string value) final {
auto lock = rw_mutex_.lock_write().move_as_ok();
uint64 old_id = 0;
uint64 old_event_id = 0;
auto it_ok = map_.emplace(key, std::make_pair(value, 0));
if (!it_ok.second) {
if (it_ok.first->second.first == value) {
@ -128,25 +128,25 @@ class BinlogKeyValue final : public KeyValueSyncInterface {
}
VLOG(binlog) << "Change value of key " << key << " from " << hex_encode(it_ok.first->second.first) << " to "
<< hex_encode(value);
old_id = it_ok.first->second.second;
old_event_id = it_ok.first->second.second;
it_ok.first->second.first = value;
} else {
VLOG(binlog) << "Set value of key " << key << " to " << hex_encode(value);
}
bool rewrite = false;
uint64 id;
auto seq_no = binlog_->next_id();
if (old_id != 0) {
uint64 event_id;
auto seq_no = binlog_->next_event_id();
if (old_event_id != 0) {
rewrite = true;
id = old_id;
event_id = old_event_id;
} else {
id = seq_no;
it_ok.first->second.second = id;
event_id = seq_no;
it_ok.first->second.second = event_id;
}
lock.reset();
add_event(seq_no,
BinlogEvent::create_raw(id, magic_, rewrite ? BinlogEvent::Flags::Rewrite : 0, Event{key, value}));
BinlogEvent::create_raw(event_id, magic_, rewrite ? BinlogEvent::Flags::Rewrite : 0, Event{key, value}));
return seq_no;
}
@ -157,11 +157,11 @@ class BinlogKeyValue final : public KeyValueSyncInterface {
return 0;
}
VLOG(binlog) << "Remove value of key " << key << ", which is " << hex_encode(it->second.first);
uint64 id = it->second.second;
uint64 event_id = it->second.second;
map_.erase(it);
auto seq_no = binlog_->next_id();
auto seq_no = binlog_->next_event_id();
lock.reset();
add_event(seq_no, BinlogEvent::create_raw(id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite,
add_event(seq_no, BinlogEvent::create_raw(event_id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite,
EmptyStorer()));
return seq_no;
}
@ -215,18 +215,18 @@ class BinlogKeyValue final : public KeyValueSyncInterface {
void erase_by_prefix(Slice prefix) final {
auto lock = rw_mutex_.lock_write().move_as_ok();
vector<uint64> ids;
vector<uint64> event_ids;
table_remove_if(map_, [&](const auto &it) {
if (begins_with(it.first, prefix)) {
ids.push_back(it.second.second);
event_ids.push_back(it.second.second);
return true;
}
return false;
});
auto seq_no = binlog_->next_id(narrow_cast<int32>(ids.size()));
auto seq_no = binlog_->next_event_id(narrow_cast<int32>(event_ids.size()));
lock.reset();
for (auto id : ids) {
add_event(seq_no, BinlogEvent::create_raw(id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite,
for (auto event_id : event_ids) {
add_event(seq_no, BinlogEvent::create_raw(event_id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite,
EmptyStorer()));
seq_no++;
}

View File

@ -208,8 +208,8 @@ Status Binlog::init(string path, const Callback &callback, DbKey db_key, DbKey o
close().ignore();
return status;
}
info_.last_id = processor_->last_id();
last_id_ = processor_->last_id();
info_.last_event_id = processor_->last_event_id();
last_event_id_ = processor_->last_event_id();
if (info_.wrong_password) {
close().ignore();
return Status::Error(static_cast<int>(Error::WrongPassword), "Wrong password");

View File

@ -31,7 +31,7 @@ extern int32 VERBOSITY_NAME(binlog);
struct BinlogInfo {
bool was_created{false};
uint64 last_id{0};
uint64 last_event_id{0};
bool is_encrypted{false};
bool wrong_password{false};
bool is_opened{false};
@ -57,16 +57,16 @@ class Binlog {
Status init(string path, const Callback &callback, DbKey db_key = DbKey::empty(), DbKey old_db_key = DbKey::empty(),
int32 dummy = -1, const Callback &debug_callback = Callback()) TD_WARN_UNUSED_RESULT;
uint64 next_id() {
return ++last_id_;
uint64 next_event_id() {
return ++last_event_id_;
}
uint64 next_id(int32 shift) {
auto res = last_id_ + 1;
last_id_ += shift;
uint64 next_event_id(int32 shift) {
auto res = last_event_id_ + 1;
last_event_id_ += shift;
return res;
}
uint64 peek_next_id() const {
return last_id_ + 1;
uint64 peek_next_event_id() const {
return last_event_id_ + 1;
}
bool empty() const {
@ -74,21 +74,21 @@ class Binlog {
}
uint64 add(int32 type, const Storer &storer) {
auto log_event_id = next_id();
add_raw_event(BinlogEvent::create_raw(log_event_id, type, 0, storer), {});
return log_event_id;
auto event_id = next_event_id();
add_raw_event(BinlogEvent::create_raw(event_id, type, 0, storer), {});
return event_id;
}
uint64 rewrite(uint64 log_event_id, int32 type, const Storer &storer) {
auto seq_no = next_id();
add_raw_event(BinlogEvent::create_raw(log_event_id, type, BinlogEvent::Flags::Rewrite, storer), {});
uint64 rewrite(uint64 event_id, int32 type, const Storer &storer) {
auto seq_no = next_event_id();
add_raw_event(BinlogEvent::create_raw(event_id, type, BinlogEvent::Flags::Rewrite, storer), {});
return seq_no;
}
uint64 erase(uint64 log_event_id) {
auto seq_no = next_id();
add_raw_event(BinlogEvent::create_raw(log_event_id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite,
EmptyStorer()),
uint64 erase(uint64 event_id) {
auto seq_no = next_event_id();
add_raw_event(
BinlogEvent::create_raw(event_id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite, EmptyStorer()),
{});
return seq_no;
}
@ -148,7 +148,7 @@ class Binlog {
unique_ptr<detail::BinlogEventsProcessor> processor_;
unique_ptr<detail::BinlogEventsBuffer> events_buffer_;
bool in_flush_events_buffer_{false};
uint64 last_id_{0};
uint64 last_event_id_{0};
double need_flush_since_ = 0;
bool need_sync_{false};
enum class State { Empty, Load, Reindex, Run } state_{State::Empty};

View File

@ -31,34 +31,34 @@ class BinlogInterface {
void close_and_destroy(Promise<> promise = {}) {
close_and_destroy_impl(std::move(promise));
}
void add_raw_event(BinlogDebugInfo info, uint64 id, BufferSlice &&raw_event, Promise<> promise = Promise<>()) {
add_raw_event_impl(id, std::move(raw_event), std::move(promise), info);
void add_raw_event(BinlogDebugInfo info, uint64 event_id, BufferSlice &&raw_event, Promise<> promise = Promise<>()) {
add_raw_event_impl(event_id, std::move(raw_event), std::move(promise), info);
}
void add_raw_event(uint64 id, BufferSlice &&raw_event, Promise<> promise = Promise<>()) {
add_raw_event_impl(id, std::move(raw_event), std::move(promise), {});
void add_raw_event(uint64 event_id, BufferSlice &&raw_event, Promise<> promise = Promise<>()) {
add_raw_event_impl(event_id, std::move(raw_event), std::move(promise), {});
}
void lazy_sync(Promise<> promise = Promise<>()) {
add_raw_event_impl(next_id(), BufferSlice(), std::move(promise), {});
add_raw_event_impl(next_event_id(), BufferSlice(), std::move(promise), {});
}
uint64 add(int32 type, const Storer &storer, Promise<> promise = Promise<>()) {
auto log_event_id = next_id();
add_raw_event_impl(log_event_id, BinlogEvent::create_raw(log_event_id, type, 0, storer), std::move(promise), {});
return log_event_id;
auto event_id = next_event_id();
add_raw_event_impl(event_id, BinlogEvent::create_raw(event_id, type, 0, storer), std::move(promise), {});
return event_id;
}
uint64 rewrite(uint64 log_event_id, int32 type, const Storer &storer, Promise<> promise = Promise<>()) {
auto seq_no = next_id();
add_raw_event_impl(seq_no, BinlogEvent::create_raw(log_event_id, type, BinlogEvent::Flags::Rewrite, storer),
uint64 rewrite(uint64 event_id, int32 type, const Storer &storer, Promise<> promise = Promise<>()) {
auto seq_no = next_event_id();
add_raw_event_impl(seq_no, BinlogEvent::create_raw(event_id, type, BinlogEvent::Flags::Rewrite, storer),
std::move(promise), {});
return seq_no;
}
uint64 erase(uint64 log_event_id, Promise<> promise = Promise<>()) {
auto seq_no = next_id();
add_raw_event_impl(seq_no,
BinlogEvent::create_raw(log_event_id, BinlogEvent::ServiceTypes::Empty,
BinlogEvent::Flags::Rewrite, EmptyStorer()),
uint64 erase(uint64 event_id, Promise<> promise = Promise<>()) {
auto seq_no = next_event_id();
add_raw_event_impl(
seq_no,
BinlogEvent::create_raw(event_id, BinlogEvent::ServiceTypes::Empty, BinlogEvent::Flags::Rewrite, EmptyStorer()),
std::move(promise), {});
return seq_no;
}
@ -67,13 +67,13 @@ class BinlogInterface {
virtual void force_flush() = 0;
virtual void change_key(DbKey db_key, Promise<> promise) = 0;
virtual uint64 next_id() = 0;
virtual uint64 next_id(int32 shift) = 0;
virtual uint64 next_event_id() = 0;
virtual uint64 next_event_id(int32 shift) = 0;
protected:
virtual void close_impl(Promise<> promise) = 0;
virtual void close_and_destroy_impl(Promise<> promise) = 0;
virtual void add_raw_event_impl(uint64 id, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) = 0;
virtual void add_raw_event_impl(uint64 seq_no, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) = 0;
};
} // namespace td

View File

@ -40,7 +40,7 @@ class BinlogActor final : public Actor {
BinlogDebugInfo debug_info;
};
void add_raw_event(uint64 seq_no, BufferSlice &&raw_event, Promise<> &&promise, BinlogDebugInfo info) {
processor_.add(seq_no, Event{std::move(raw_event), std::move(promise), info}, [&](uint64 id, Event &&event) {
processor_.add(seq_no, Event{std::move(raw_event), std::move(promise), info}, [&](uint64 event_id, Event &&event) {
if (!event.raw_event.empty()) {
do_add_raw_event(std::move(event.raw_event), event.debug_info);
}
@ -178,9 +178,9 @@ Result<BinlogInfo> ConcurrentBinlog::init(string path, const Callback &callback,
void ConcurrentBinlog::init_impl(unique_ptr<Binlog> binlog, int32 scheduler_id) {
path_ = binlog->get_path().str();
last_id_ = binlog->peek_next_id();
last_event_id_ = binlog->peek_next_event_id();
binlog_actor_ = create_actor_on_scheduler<detail::BinlogActor>(PSLICE() << "Binlog " << path_, scheduler_id,
std::move(binlog), last_id_);
std::move(binlog), last_event_id_);
}
void ConcurrentBinlog::close_impl(Promise<> promise) {
@ -189,8 +189,10 @@ void ConcurrentBinlog::close_impl(Promise<> promise) {
void ConcurrentBinlog::close_and_destroy_impl(Promise<> promise) {
send_closure(std::move(binlog_actor_), &detail::BinlogActor::close_and_destroy, std::move(promise));
}
void ConcurrentBinlog::add_raw_event_impl(uint64 id, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) {
send_closure(binlog_actor_, &detail::BinlogActor::add_raw_event, id, std::move(raw_event), std::move(promise), info);
void ConcurrentBinlog::add_raw_event_impl(uint64 event_id, BufferSlice &&raw_event, Promise<> promise,
BinlogDebugInfo info) {
send_closure(binlog_actor_, &detail::BinlogActor::add_raw_event, event_id, std::move(raw_event), std::move(promise),
info);
}
void ConcurrentBinlog::force_sync(Promise<> promise) {
send_closure(binlog_actor_, &detail::BinlogActor::force_sync, std::move(promise));

View File

@ -45,11 +45,11 @@ class ConcurrentBinlog final : public BinlogInterface {
void force_flush() final;
void change_key(DbKey db_key, Promise<> promise) final;
uint64 next_id() final {
return last_id_.fetch_add(1, std::memory_order_relaxed);
uint64 next_event_id() final {
return last_event_id_.fetch_add(1, std::memory_order_relaxed);
}
uint64 next_id(int32 shift) final {
return last_id_.fetch_add(shift, std::memory_order_relaxed);
uint64 next_event_id(int32 shift) final {
return last_event_id_.fetch_add(shift, std::memory_order_relaxed);
}
CSlice get_path() const {
@ -60,11 +60,11 @@ class ConcurrentBinlog final : public BinlogInterface {
void init_impl(unique_ptr<Binlog> binlog, int scheduler_id);
void close_impl(Promise<> promise) final;
void close_and_destroy_impl(Promise<> promise) final;
void add_raw_event_impl(uint64 id, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) final;
void add_raw_event_impl(uint64 event_id, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) final;
ActorOwn<detail::BinlogActor> binlog_actor_;
string path_;
std::atomic<uint64> last_id_{0};
std::atomic<uint64> last_event_id_{0};
};
} // namespace td

View File

@ -44,20 +44,20 @@ struct Trie {
};
td::vector<FullNode> nodes_;
void do_add(int id, td::Slice value) {
nodes_[id].sum++;
void do_add(int event_id, td::Slice value) {
nodes_[event_id].sum++;
if (value.empty()) {
return;
}
auto c = static_cast<td::uint8>(value[0]);
auto next_id = nodes_[id].next[c];
if (next_id == 0) {
next_id = static_cast<int>(nodes_.size());
auto next_event_id = nodes_[event_id].next[c];
if (next_event_id == 0) {
next_event_id = static_cast<int>(nodes_.size());
nodes_.emplace_back();
nodes_[id].next[c] = next_id;
nodes_[event_id].next[c] = next_event_id;
}
do_add(next_id, value.substr(1));
do_add(next_event_id, value.substr(1));
}
void do_dump(td::string path, int v) {
@ -84,11 +84,11 @@ struct Trie {
return;
}
for (int c = 0; c < 256; c++) {
auto next_id = nodes_[v].next[c];
if (next_id == 0) {
auto next_event_id = nodes_[v].next[c];
if (next_event_id == 0) {
continue;
}
do_dump(path + static_cast<char>(c), next_id);
do_dump(path + static_cast<char>(c), next_event_id);
}
}
};
@ -137,9 +137,10 @@ int main(int argc, char *argv[]) {
auto key = td::TlParser(event.data_).fetch_string<td::Slice>();
info[event.type_].trie.add(key);
}
LOG(PLAIN) << "LogEvent[" << td::tag("id", td::format::as_hex(event.id_)) << td::tag("type", event.type_)
<< td::tag("flags", event.flags_) << td::tag("size", event.data_.size())
<< td::tag("data", td::format::escaped(event.data_)) << "]\n";
LOG(PLAIN) << "LogEvent[" << td::tag("event_id", td::format::as_hex(event.id_))
<< td::tag("type", event.type_) << td::tag("flags", event.flags_)
<< td::tag("size", event.data_.size()) << td::tag("data", td::format::escaped(event.data_))
<< "]\n";
})
.ensure();

View File

@ -16,13 +16,13 @@ namespace detail {
Status BinlogEventsProcessor::do_event(BinlogEvent &&event) {
offset_ = event.offset_;
auto fixed_id = event.id_ * 2;
if ((event.flags_ & BinlogEvent::Flags::Rewrite) && !ids_.empty() && ids_.back() >= fixed_id) {
auto it = std::lower_bound(ids_.begin(), ids_.end(), fixed_id);
if (it == ids_.end() || *it != fixed_id) {
auto fixed_event_id = event.id_ * 2;
if ((event.flags_ & BinlogEvent::Flags::Rewrite) && !event_ids_.empty() && event_ids_.back() >= fixed_event_id) {
auto it = std::lower_bound(event_ids_.begin(), event_ids_.end(), fixed_event_id);
if (it == event_ids_.end() || *it != fixed_event_id) {
return Status::Error(PSLICE() << "Ignore rewrite log event " << event.public_to_string());
}
auto pos = it - ids_.begin();
auto pos = it - event_ids_.begin();
total_raw_events_size_ -= static_cast<int64>(events_[pos].raw_event_.size());
if (event.type_ == BinlogEvent::ServiceTypes::Empty) {
*it += 1;
@ -36,15 +36,15 @@ Status BinlogEventsProcessor::do_event(BinlogEvent &&event) {
} else if (event.type_ < 0) {
// just skip service events
} else {
if (!(ids_.empty() || ids_.back() < fixed_id)) {
return Status::Error(PSLICE() << offset_ << " " << ids_.size() << " " << ids_.back() << " " << fixed_id << " "
<< event.public_to_string() << " " << total_events_ << " "
if (!(event_ids_.empty() || event_ids_.back() < fixed_event_id)) {
return Status::Error(PSLICE() << offset_ << " " << event_ids_.size() << " " << event_ids_.back() << " "
<< fixed_event_id << " " << event.public_to_string() << " " << total_events_ << " "
<< total_raw_events_size_);
}
last_id_ = event.id_;
last_event_id_ = event.id_;
total_raw_events_size_ += static_cast<int64>(event.raw_event_.size());
total_events_++;
ids_.push_back(fixed_id);
event_ids_.push_back(fixed_event_id);
events_.emplace_back(std::move(event));
}
@ -55,22 +55,22 @@ Status BinlogEventsProcessor::do_event(BinlogEvent &&event) {
}
void BinlogEventsProcessor::compactify() {
CHECK(ids_.size() == events_.size());
auto ids_from = ids_.begin();
auto ids_to = ids_from;
CHECK(event_ids_.size() == events_.size());
auto event_ids_from = event_ids_.begin();
auto event_ids_to = event_ids_from;
auto events_from = events_.begin();
auto events_to = events_from;
for (; ids_from != ids_.end(); ids_from++, events_from++) {
if ((*ids_from & 1) == 0) {
*ids_to++ = *ids_from;
for (; event_ids_from != event_ids_.end(); event_ids_from++, events_from++) {
if ((*event_ids_from & 1) == 0) {
*event_ids_to++ = *event_ids_from;
*events_to++ = std::move(*events_from);
}
}
ids_.erase(ids_to, ids_.end());
event_ids_.erase(event_ids_to, event_ids_.end());
events_.erase(events_to, events_.end());
total_events_ = ids_.size();
total_events_ = event_ids_.size();
empty_events_ = 0;
CHECK(ids_.size() == events_.size());
CHECK(event_ids_.size() == events_.size());
}
} // namespace detail

View File

@ -23,17 +23,18 @@ class BinlogEventsProcessor {
template <class CallbackT>
void for_each(CallbackT &&callback) {
for (size_t i = 0; i < ids_.size(); i++) {
LOG_CHECK(i == 0 || ids_[i - 1] < ids_[i]) << ids_[i - 1] << " " << events_[i - 1].public_to_string() << " "
<< ids_[i] << " " << events_[i].public_to_string();
if ((ids_[i] & 1) == 0) {
for (size_t i = 0; i < event_ids_.size(); i++) {
LOG_CHECK(i == 0 || event_ids_[i - 1] < event_ids_[i])
<< event_ids_[i - 1] << " " << events_[i - 1].public_to_string() << " " << event_ids_[i] << " "
<< events_[i].public_to_string();
if ((event_ids_[i] & 1) == 0) {
callback(events_[i]);
}
}
}
uint64 last_id() const {
return last_id_;
uint64 last_event_id() const {
return last_event_id_;
}
int64 offset() const {
return offset_;
@ -43,12 +44,12 @@ class BinlogEventsProcessor {
}
private:
// holds (id * 2 + was_deleted)
std::vector<uint64> ids_;
// holds (event_id * 2 + was_deleted)
std::vector<uint64> event_ids_;
std::vector<BinlogEvent> events_;
size_t total_events_{0};
size_t empty_events_{0};
uint64 last_id_{0};
uint64 last_event_id_{0};
int64 offset_{0};
int64 total_raw_events_size_{0};

View File

@ -2,7 +2,7 @@ if ((CMAKE_MAJOR_VERSION LESS 3) OR (CMAKE_VERSION VERSION_LESS "3.0.2"))
message(FATAL_ERROR "CMake >= 3.0.2 is required")
endif()
option(TDUTILS_MIME_TYPE "Generate mime types conversion; requires gperf" ON)
option(TDUTILS_MIME_TYPE "Generate MIME types conversion; requires gperf" ON)
if (NOT DEFINED CMAKE_INSTALL_LIBDIR)
set(CMAKE_INSTALL_LIBDIR "lib")

View File

@ -155,7 +155,7 @@ class Container {
void release(int32 id) {
inc_generation(id);
slots_[id].data = DataT();
if (slots_[id].generation & ~TYPE_MASK) { // generation overflow. Can't use this id anymore
if (slots_[id].generation & ~TYPE_MASK) { // generation overflow. Can't use this identifier anymore
empty_slots_.push_back(id);
}
}

View File

@ -13,7 +13,7 @@
namespace td {
// Process states in order defined by their Id
// Process states in order defined by their SeqNo
template <class DataT>
class OrderedEventsProcessor {
public:

View File

@ -80,7 +80,7 @@ Logger::Logger(LogInterface &log, const LogOptions &options, int log_level, Slic
}
sb_ << ']';
// thread id
// thread identifier
auto thread_id = get_thread_id();
sb_ << "[t";
if (static_cast<uint32>(thread_id) < 10) {

View File

@ -199,7 +199,9 @@ Result<FileFd> FileFd::open(CSlice filepath, int32 flags, int32 mode) {
} else {
creation_disposition = OPEN_EXISTING;
}
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
native_flags |= FILE_FLAG_OPEN_REPARSE_POINT;
#endif
}
if (flags & Direct) {

View File

@ -600,12 +600,12 @@ class UdpSocketFdImpl {
case EBADF: // impossible
case ENOTSOCK: // impossible
case EPIPE: // impossible for udp
case ECONNRESET: // impossible for udp
case EPIPE: // impossible for UDP
case ECONNRESET: // impossible for UDP
case EDESTADDRREQ: // we checked that address is valid
case ENOTCONN: // we checked that address is valid
case EINTR: // we already skipped all EINTR
case EISCONN: // impossible for udp socket
case EISCONN: // impossible for UDP socket
case EOPNOTSUPP:
case ENOTDIR:
case EFAULT:

View File

@ -20,7 +20,6 @@
#include <memory>
namespace td {
// Udp and errors
namespace detail {
class UdpSocketFdImpl;
class UdpSocketFdImplDeleter {

View File

@ -14,14 +14,23 @@
#else
#include <unistd.h>
#endif
#else
#include "td/utils/port/Clocks.h"
#endif
namespace td {
void usleep_for(int32 microseconds) {
#if TD_PORT_WINDOWS
if (microseconds < 2000) {
auto end_time = Clocks::monotonic() + microseconds * 1e-6;
do {
SwitchToThread();
} while (Clocks::monotonic() < end_time);
} else {
int32 milliseconds = microseconds / 1000 + (microseconds % 1000 ? 1 : 0);
Sleep(milliseconds);
}
#else
#if _POSIX_C_SOURCE >= 199309L
timespec ts;

View File

@ -46,10 +46,12 @@ class CheckedHeap {
nodes[i].value = i;
}
}
static void xx(int key, const td::HeapNode *heap_node) {
const Node *node = static_cast<const Node *>(heap_node);
std::fprintf(stderr, "(%d;%d)", node->key, node->value);
}
void check() const {
for (auto p : set_heap) {
std::fprintf(stderr, "(%d;%d)", p.first, p.second);
@ -59,13 +61,16 @@ class CheckedHeap {
std::fprintf(stderr, "\n");
kheap.check();
}
int random_id() const {
CHECK(!empty());
return ids[td::Random::fast(0, static_cast<int>(ids.size() - 1))];
}
std::size_t size() const {
return ids.size();
}
bool empty() const {
return ids.empty();
}
@ -77,8 +82,8 @@ class CheckedHeap {
ASSERT_EQ(res, kheap.top_key());
return res;
}
int insert(int key) {
// std::fprintf(stderr, "insert %d\n", key);
int id;
if (free_ids.empty()) {
UNREACHABLE();
@ -96,15 +101,15 @@ class CheckedHeap {
set_heap.emplace(key, id);
return id;
}
void fix_key(int new_key, int id) {
// std::fprintf(stderr, "fix key %d %d (old_key = %d)\n", new_key, id, nodes[id].key);
set_heap.erase(std::make_pair(nodes[id].key, id));
nodes[id].key = new_key;
kheap.fix(new_key, &nodes[id]);
set_heap.emplace(new_key, id);
}
void erase(int id) {
// std::fprintf(stderr, "erase %d\n", id);
int pos = rev_ids[id];
CHECK(pos != -1);
ids[pos] = ids.back();
@ -116,8 +121,8 @@ class CheckedHeap {
kheap.erase(&nodes[id]);
set_heap.erase(std::make_pair(nodes[id].key, id));
}
void pop() {
// std::fprintf(stderr, "pop\n");
CHECK(!empty());
Node *node = static_cast<Node *>(kheap.pop());
int id = node->value;

View File

@ -11,6 +11,7 @@ set(TD_TEST_SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/message_entities.cpp
${CMAKE_CURRENT_SOURCE_DIR}/mtproto.cpp
${CMAKE_CURRENT_SOURCE_DIR}/poll.cpp
${CMAKE_CURRENT_SOURCE_DIR}/query_merger.cpp
${CMAKE_CURRENT_SOURCE_DIR}/secret.cpp
${CMAKE_CURRENT_SOURCE_DIR}/secure_storage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/set_with_position.cpp

View File

@ -75,17 +75,17 @@ TEST(DB, binlog_encryption) {
{
td::Binlog binlog;
binlog.init(binlog_name.str(), [](const td::BinlogEvent &x) {}).ensure();
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_id(), 1, 0, td::create_storer("AAAA")),
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_event_id(), 1, 0, td::create_storer("AAAA")),
td::BinlogDebugInfo{__FILE__, __LINE__});
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_id(), 1, 0, td::create_storer("BBBB")),
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_event_id(), 1, 0, td::create_storer("BBBB")),
td::BinlogDebugInfo{__FILE__, __LINE__});
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_id(), 1, 0, td::create_storer(long_data)),
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_event_id(), 1, 0, td::create_storer(long_data)),
td::BinlogDebugInfo{__FILE__, __LINE__});
LOG(INFO) << "SET PASSWORD";
binlog.change_key(cucumber);
binlog.change_key(hello);
LOG(INFO) << "OK";
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_id(), 1, 0, td::create_storer("CCCC")),
binlog.add_raw_event(td::BinlogEvent::create_raw(binlog.next_event_id(), 1, 0, td::create_storer("CCCC")),
td::BinlogDebugInfo{__FILE__, __LINE__});
binlog.close().ensure();
}

View File

@ -221,14 +221,12 @@ class InitTask : public Task {
private:
Options options_;
td::Promise<> promise_;
bool start_flag_{false};
void start_up() override {
send_query(td::make_tl_object<td::td_api::getAuthorizationState>(),
[this](auto res) { this->process_authorization_state(res.move_as_ok()); });
send_query(td::make_tl_object<td::td_api::getOption>("version"),
[](auto res) { LOG(INFO) << td::td_api::to_string(res.ok()); });
}
void process_authorization_state(td::tl_object_ptr<td::td_api::Object> authorization_state) {
start_flag_ = true;
td::tl_object_ptr<td::td_api::Function> function;
switch (authorization_state->get_id()) {
case td::td_api::authorizationStateReady::ID:
@ -267,9 +265,6 @@ class InitTask : public Task {
});
}
void process_update(std::shared_ptr<TestClient::Update> update) override {
if (!start_flag_) {
return;
}
if (!update->object) {
return;
}

100
test/query_merger.cpp Normal file
View File

@ -0,0 +1,100 @@
//
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2023
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include "td/telegram/QueryMerger.h"
#include "td/actor/actor.h"
#include "td/actor/ConcurrentScheduler.h"
#include "td/actor/SleepActor.h"
#include "td/utils/common.h"
#include "td/utils/FlatHashSet.h"
#include "td/utils/Random.h"
#include "td/utils/tests.h"
#include <queue>
class TestQueryMerger : public td::Actor {
void start_up() override {
query_merger_.set_merge_function([this](td::vector<td::int64> query_ids, td::Promise<td::Unit> &&promise) {
ASSERT_TRUE(!query_ids.empty());
ASSERT_EQ(query_ids.size(), td::min(next_query_ids_.size(), MAX_MERGED_QUERY_COUNT));
for (auto query_id : query_ids) {
auto next_query_id = next_query_ids_.front();
next_query_ids_.pop();
ASSERT_EQ(query_id, next_query_id);
}
current_query_count_++;
ASSERT_TRUE(current_query_count_ <= MAX_CONCURRENT_QUERY_COUNT);
if (!next_query_ids_.empty()) {
ASSERT_EQ(current_query_count_, MAX_CONCURRENT_QUERY_COUNT);
}
td::create_actor<td::SleepActor>("CompleteMergeQuery", 0.02,
td::PromiseCreator::lambda([this, query_ids, promise = std::move(promise)](
td::Result<td::Unit> result) mutable {
for (auto query_id : query_ids) {
LOG(INFO) << "Complete " << query_id;
bool is_erased = pending_query_ids_.erase(query_id);
ASSERT_TRUE(is_erased);
}
current_query_count_--;
promise.set_result(std::move(result));
}))
.release();
yield();
});
loop();
}
void loop() {
std::size_t query_count = 0;
std::size_t added_queries = td::Random::fast(1, 3);
while (query_count++ < added_queries && total_query_count_++ < MAX_QUERY_COUNT) {
td::int64 query_id = td::Random::fast(1, 20);
if (pending_query_ids_.insert(query_id).second) {
next_query_ids_.push(query_id);
}
query_merger_.add_query(query_id,
td::PromiseCreator::lambda([this, query_id](td::Result<td::Unit> result) mutable {
completed_query_count_++;
if (completed_query_count_ == MAX_QUERY_COUNT) {
ASSERT_EQ(current_query_count_, 0u);
ASSERT_TRUE(next_query_ids_.empty());
ASSERT_TRUE(pending_query_ids_.empty());
td::Scheduler::instance()->finish();
} else {
yield();
}
}));
}
}
static constexpr std::size_t MAX_CONCURRENT_QUERY_COUNT = 5;
static constexpr std::size_t MAX_MERGED_QUERY_COUNT = 3;
static constexpr std::size_t MAX_QUERY_COUNT = 1000;
td::QueryMerger query_merger_{"QueryMerger", MAX_CONCURRENT_QUERY_COUNT, MAX_MERGED_QUERY_COUNT};
std::size_t current_query_count_ = 0;
std::size_t total_query_count_ = 0;
std::size_t completed_query_count_ = 0;
std::queue<td::int64> next_query_ids_;
td::FlatHashSet<td::int64> pending_query_ids_;
};
constexpr std::size_t TestQueryMerger::MAX_CONCURRENT_QUERY_COUNT;
constexpr std::size_t TestQueryMerger::MAX_MERGED_QUERY_COUNT;
constexpr std::size_t TestQueryMerger::MAX_QUERY_COUNT;
TEST(QueryMerger, stress) {
td::ConcurrentScheduler sched(0, 0);
sched.create_actor_unsafe<TestQueryMerger>(0, "TestQueryMerger").release();
sched.start();
while (sched.run_main(10)) {
// empty
}
sched.finish();
}

View File

@ -382,14 +382,14 @@ class FakeBinlog final
void force_flush() final {
}
uint64 next_id() final {
auto res = last_id_;
last_id_++;
uint64 next_event_id() final {
auto res = last_event_id_;
last_event_id_++;
return res;
}
uint64 next_id(int32 shift) final {
auto res = last_id_;
last_id_ += shift;
uint64 next_event_id(int32 shift) final {
auto res = last_event_id_;
last_event_id_ += shift;
return res;
}
template <class F>
@ -420,7 +420,7 @@ class FakeBinlog final
}
void close_and_destroy_impl(Promise<> promise) final {
}
void add_raw_event_impl(uint64 id, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) final {
void add_raw_event_impl(uint64 event_id, BufferSlice &&raw_event, Promise<> promise, BinlogDebugInfo info) final {
auto event = BinlogEvent(std::move(raw_event), info);
LOG(INFO) << "ADD EVENT: " << event.id_ << " " << event;
pending_events_.emplace_back();
@ -464,7 +464,7 @@ class FakeBinlog final
}
}
bool has_request_sync = false;
uint64 last_id_ = 1;
uint64 last_event_id_ = 1;
detail::BinlogEventsProcessor events_processor_;
struct PendingEvent {

View File

@ -204,11 +204,10 @@ class DoAuthentication final : public TestClinetTask {
: name_(std::move(name)), phone_(std::move(phone)), code_(std::move(code)), promise_(std::move(promise)) {
}
void start_up() final {
send_query(td::make_tl_object<td::td_api::getAuthorizationState>(),
[this](auto res) { this->process_authorization_state(std::move(res)); });
send_query(td::make_tl_object<td::td_api::getOption>("version"),
[](auto res) { LOG(INFO) << td::td_api::to_string(res); });
}
void process_authorization_state(td::tl_object_ptr<td::td_api::Object> authorization_state) {
start_flag_ = true;
td::tl_object_ptr<td::td_api::Function> function;
switch (authorization_state->get_id()) {
case td::td_api::authorizationStateWaitPhoneNumber::ID:
@ -261,12 +260,8 @@ class DoAuthentication final : public TestClinetTask {
td::string phone_;
td::string code_;
td::Promise<> promise_;
bool start_flag_{false};
void process_update(std::shared_ptr<TestClient::Update> update) final {
if (!start_flag_) {
return;
}
if (!update->object) {
return;
}