Migrate from Atomix to Kafka

This commit is contained in:
Andrea Cavalli 2022-06-27 00:06:53 +02:00
parent 77593c2722
commit 6339f78db8
45 changed files with 908 additions and 1509 deletions

26
pom.xml
View File

@ -9,7 +9,6 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<revision>0-SNAPSHOT</revision>
<atomix.version>3.1.12</atomix.version>
<record.builder.version>33</record.builder.version>
</properties>
<repositories>
@ -108,31 +107,6 @@
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
</dependency>
<dependency>
<groupId>io.atomix</groupId>
<artifactId>atomix</artifactId>
<version>${atomix.version}</version>
<exclusions>
<exclusion>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.atomix</groupId>
<artifactId>atomix-raft</artifactId>
<version>${atomix.version}</version>
</dependency>
<dependency>
<groupId>io.atomix</groupId>
<artifactId>atomix-primary-backup</artifactId>
<version>${atomix.version}</version>
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>

View File

@ -1,27 +0,0 @@
package it.tdlight.reactiveapi;
import java.util.Arrays;
public record Address(String host, int port) {
public Address {
if (host.isBlank()) {
throw new IllegalArgumentException("Host is blank");
}
if (port < 0) {
throw new IndexOutOfBoundsException(port);
}
if (port >= 65536) {
throw new IndexOutOfBoundsException(port);
}
}
public static Address fromString(String address) {
var parts = address.split(":");
if (parts.length < 2) {
throw new IllegalArgumentException("Malformed client address, it must have a port (host:port)");
}
var host = String.join(":", Arrays.copyOf(parts, parts.length - 1));
var port = Integer.parseUnsignedInt(parts[parts.length - 1]);
return new Address(host, port);
}
}

View File

@ -1,18 +1,7 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.AtomixUtils.fromCf;
import static java.util.Collections.unmodifiableSet;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.CompletableFuture.failedFuture;
import com.google.common.primitives.Longs;
import io.atomix.cluster.messaging.MessagingException;
import io.atomix.cluster.messaging.Subscription;
import io.atomix.core.Atomix;
import io.atomix.core.idgenerator.AsyncAtomicIdGenerator;
import io.atomix.core.lock.AsyncAtomicLock;
import io.atomix.core.map.AsyncAtomicMap;
import io.atomix.protocols.raft.MultiRaftProtocol;
import it.tdlight.reactiveapi.CreateSessionRequest.CreateBotSessionRequest;
import it.tdlight.reactiveapi.CreateSessionRequest.CreateUserSessionRequest;
import it.tdlight.reactiveapi.CreateSessionRequest.LoadSessionFromDiskRequest;
@ -20,23 +9,17 @@ import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import java.util.concurrent.locks.LockSupport;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
@ -44,460 +27,227 @@ public class AtomixReactiveApi implements ReactiveApi {
private static final Logger LOG = LoggerFactory.getLogger(AtomixReactiveApi.class);
/**
* nodeId is null when DiskSessions is null
*/
@Nullable
private final String nodeId;
private final Atomix atomix;
private final KafkaProducer kafkaProducer;
private final KafkaConsumer kafkaConsumer;
private final Set<ResultingEventTransformer> resultingEventTransformerSet;
private final AsyncAtomicIdGenerator nextSessionLiveId;
private final boolean clientOnly;
private final AsyncAtomicLock sessionModificationLock;
private final AsyncAtomicMap<Long, String> userIdToNodeId;
private final KafkaTdlibClient kafkaTDLibClient;
@Nullable
private final KafkaTdlibServer kafkaTDLibServer;
private final Set<ResultingEventTransformer> resultingEventTransformerSet;
/**
* live id -> session
* user id -> session
*/
private final ConcurrentMap<Long, ReactiveApiPublisher> localLiveSessions = new ConcurrentHashMap<>();
private final ConcurrentMap<Long, ReactiveApiPublisher> localSessions = new ConcurrentHashMap<>();
/**
* DiskSessions is null when nodeId is null
*/
@Nullable
private final DiskSessionsManager diskSessions;
private volatile boolean closeRequested;
public AtomixReactiveApi(@Nullable String nodeId,
Atomix atomix,
public AtomixReactiveApi(boolean clientOnly,
KafkaParameters kafkaParameters,
@Nullable DiskSessionsManager diskSessions,
@NotNull Set<ResultingEventTransformer> resultingEventTransformerSet) {
this.nodeId = nodeId;
this.atomix = atomix;
this.kafkaProducer = new KafkaProducer(kafkaParameters);
this.kafkaConsumer = new KafkaConsumer(kafkaParameters);
this.resultingEventTransformerSet = resultingEventTransformerSet;
if (nodeId == null) {
if (diskSessions != null) {
throw new IllegalArgumentException("A client must not manage disk sessions");
}
this.clientOnly = clientOnly;
var kafkaTDLibRequestProducer = new KafkaTdlibRequestProducer(kafkaParameters);
var kafkaTDLibResponseConsumer = new KafkaTdlibResponseConsumer(kafkaParameters);
var kafkaClientBoundConsumer = new KafkaClientBoundConsumer(kafkaParameters);
this.kafkaTDLibClient = new KafkaTdlibClient(kafkaTDLibRequestProducer,
kafkaTDLibResponseConsumer,
kafkaClientBoundConsumer
);
if (clientOnly) {
this.kafkaTDLibServer = null;
} else {
if (diskSessions == null) {
throw new IllegalArgumentException("A node must be able to manage disk sessions");
}
var kafkaTDLibRequestConsumer = new KafkaTdlibRequestConsumer(kafkaParameters);
var kafkaTDLibResponseProducer = new KafkaTdlibResponseProducer(kafkaParameters);
var kafkaClientBoundProducer = new KafkaClientBoundProducer(kafkaParameters);
this.kafkaTDLibServer = new KafkaTdlibServer(kafkaTDLibRequestConsumer,
kafkaTDLibResponseProducer,
kafkaClientBoundProducer
);
}
var raftProtocol = MultiRaftProtocol.builder().build();
this.nextSessionLiveId = atomix
.atomicIdGeneratorBuilder("session-live-id")
.withProtocol(raftProtocol)
.build()
.async();
this.sessionModificationLock = atomix
.atomicLockBuilder("session-modification")
.withProtocol(raftProtocol)
.build()
.async();
this.userIdToNodeId = atomix
.<Long, String>atomicMapBuilder("user-id-to-node-id")
//.withCacheEnabled(true)
//.withCacheSize(4096)
.withNullValues(false)
.withProtocol(raftProtocol)
.build()
.async();
this.resultingEventTransformerSet = resultingEventTransformerSet;
this.diskSessions = diskSessions;
}
@Override
public Mono<Void> start() {
Mono<Set<Long>> idsSavedIntoLocalConfiguration = Mono.fromCallable(() -> {
if (diskSessions == null) {
return Set.of();
}
synchronized (diskSessions) {
return diskSessions.getSettings().userIdToSession().keySet();
}
});
Mono<Set<Long>> distributedIds;
if (this.nodeId == null) {
distributedIds = Mono.just(Set.of());
} else {
distributedIds = this
.getAllUsers()
.flatMapIterable(Map::entrySet)
.filter(entry -> entry.getValue().equals(this.nodeId))
.map(Entry::getKey)
.collect(Collectors.toUnmodifiableSet());
}
record DiskChanges(Set<Long> normalIds, Set<Long> addedIds, Set<Long> removedIds) {}
var diskChangesMono = Mono.zip(idsSavedIntoLocalConfiguration, distributedIds).map(tuple -> {
var localSet = tuple.getT1();
var remoteSet = tuple.getT2();
var deletedUsers = new HashSet<>(remoteSet);
deletedUsers.removeAll(localSet);
var addedUsers = new HashSet<>(localSet);
addedUsers.removeAll(remoteSet);
var normalUsers = new HashSet<>(localSet);
normalUsers.removeAll(addedUsers);
for (long user : addedUsers) {
LOG.warn("Detected a new user id from the disk configuration file: {}", user);
}
for (long user : normalUsers) {
LOG.info("Detected a user id from the disk configuration file: {}", user);
}
for (long user : deletedUsers) {
LOG.warn("The user id {} has been deleted from the disk configuration file", user);
}
return new DiskChanges(unmodifiableSet(normalUsers), unmodifiableSet(addedUsers), unmodifiableSet(deletedUsers));
}).cache();
var removeObsoleteDiskSessions = diskChangesMono
.flatMapIterable(diskChanges -> diskChanges.removedIds)
.concatMap(removedIds -> fromCf(() -> destroySession(removedIds, nodeId)))
.then();
var addedDiskSessionsFlux = diskChangesMono
.flatMapIterable(diskChanges -> diskChanges.addedIds)
.concatMap(this::getLocalDiskSession);
var normalDiskSessionsFlux = diskChangesMono
.flatMapIterable(diskChanges -> diskChanges.normalIds)
.concatMap(this::getLocalDiskSession);
var addNewDiskSessions = addedDiskSessionsFlux.concatMap(diskSessionAndId -> {
var id = diskSessionAndId.id;
var diskSession = diskSessionAndId.diskSession;
return createSession(new LoadSessionFromDiskRequest(id,
diskSession.token,
diskSession.phoneNumber,
true
));
}).then();
var loadExistingDiskSessions = normalDiskSessionsFlux.concatMap(diskSessionAndId -> {
var id = diskSessionAndId.id;
var diskSession = diskSessionAndId.diskSession;
return createSession(new LoadSessionFromDiskRequest(id,
diskSession.token,
diskSession.phoneNumber,
false
));
}).then();
var diskInitMono = Mono.when(removeObsoleteDiskSessions, loadExistingDiskSessions, addNewDiskSessions)
.subscribeOn(Schedulers.boundedElastic())
.doOnTerminate(() -> LOG.info("Loaded all saved sessions from disk"));
// Listen for create-session signals
Mono<Subscription> createSessionSubscriptionMono;
if (nodeId != null) {
createSessionSubscriptionMono = fromCf(() -> atomix
.getEventService()
.subscribe("create-session", CreateSessionRequest::deserializeBytes, req -> {
if (req instanceof LoadSessionFromDiskRequest) {
return failedFuture(new IllegalArgumentException("Can't pass a local request through the cluster"));
} else {
return createSession(req).toFuture();
}
}, CreateSessionResponse::serializeBytes));
} else {
createSessionSubscriptionMono = Mono.empty();
}
// Listen for revive-session signals
Mono<Subscription> reviveSessionSubscriptionMono;
if (nodeId != null) {
reviveSessionSubscriptionMono = fromCf(() -> atomix
.getEventService()
.subscribe("revive-session", (Long userId) -> this.getLocalDiskSession(userId).flatMap(sessionAndId -> {
var diskSession = sessionAndId.diskSession();
var request = new LoadSessionFromDiskRequest(userId, diskSession.token, diskSession.phoneNumber, false);
return this.createSession(request);
}).onErrorResume(ex -> Mono.empty()).then(Mono.empty()).toFuture()));
} else {
reviveSessionSubscriptionMono = Mono.empty();
}
return diskInitMono.then(Mono.when(createSessionSubscriptionMono, reviveSessionSubscriptionMono));
}
private CompletableFuture<Void> destroySession(long userId, String nodeId) {
LOG.debug("Received session delete request: user_id={}, node_id=\"{}\"", userId, nodeId);
// Lock sessions modification
return sessionModificationLock
.lock()
.thenCompose(lockVersion -> {
LOG.trace("Obtained session modification lock for session delete request: {} \"{}\"", userId, nodeId);
return userIdToNodeId
.remove(userId, nodeId)
.thenAccept(deleted -> LOG.debug("Deleted session {} \"{}\": {}", userId, nodeId, deleted));
var idsSavedIntoLocalConfiguration = Mono
.<Set<Entry<Long, DiskSession>>>fromCallable(() -> {
if (diskSessions == null) {
return Set.of();
}
synchronized (diskSessions) {
return diskSessions.getSettings().userIdToSession().entrySet();
}
})
.whenComplete((response, error) -> sessionModificationLock
.unlock()
.thenRun(() -> LOG.trace("Released session modification lock for session delete request: {} \"{}\"", userId, nodeId))
)
.whenComplete((resp, ex) -> LOG.debug("Handled session delete request {} \"{}\", the response is: {}", userId, nodeId, resp, ex));
}
.subscribeOn(Schedulers.boundedElastic())
.flatMapIterable(a -> a)
.map(a -> new DiskSessionAndId(a.getValue(), a.getKey()));
/**
* Send a request to the cluster to load that user id from disk
*/
public Mono<Void> tryReviveSession(long userId) {
return Mono.fromRunnable(() -> atomix.getEventService().broadcast("revive-session", userId));
return idsSavedIntoLocalConfiguration
.filter(diskSessionAndId -> {
try {
diskSessionAndId.diskSession().validate();
} catch (Throwable ex) {
LOG.error("Failed to load disk session {}", diskSessionAndId.id, ex);
return false;
}
return true;
})
.flatMap(diskSessionAndId -> {
var id = diskSessionAndId.id;
var diskSession = diskSessionAndId.diskSession;
return createSession(new LoadSessionFromDiskRequest(id,
diskSession.token,
diskSession.phoneNumber,
true
));
})
.then()
.doOnTerminate(() -> LOG.info("Loaded all saved sessions from disk"));
}
@Override
public Mono<CreateSessionResponse> createSession(CreateSessionRequest req) {
LOG.debug("Received create session request: {}", req);
if (nodeId == null) {
if (clientOnly) {
return Mono.error(new UnsupportedOperationException("This is a client, it can't have own sessions"));
}
Mono<CreateSessionResponse> unlockedSessionCreationMono = Mono.defer(() -> {
LOG.trace("Obtained session modification lock for session request: {}", req);
// Generate session id
return this
.nextFreeLiveId()
.flatMap(liveId -> {
// Create the session instance
ReactiveApiPublisher reactiveApiPublisher;
boolean loadedFromDisk;
long userId;
String botToken;
Long phoneNumber;
if (req instanceof CreateBotSessionRequest createBotSessionRequest) {
loadedFromDisk = false;
userId = createBotSessionRequest.userId();
botToken = createBotSessionRequest.token();
phoneNumber = null;
reactiveApiPublisher = ReactiveApiPublisher.fromToken(atomix, kafkaProducer, resultingEventTransformerSet,
liveId,
userId,
botToken
);
} else if (req instanceof CreateUserSessionRequest createUserSessionRequest) {
loadedFromDisk = false;
userId = createUserSessionRequest.userId();
botToken = null;
phoneNumber = createUserSessionRequest.phoneNumber();
reactiveApiPublisher = ReactiveApiPublisher.fromPhoneNumber(atomix, kafkaProducer, resultingEventTransformerSet,
liveId,
userId,
phoneNumber
);
} else if (req instanceof LoadSessionFromDiskRequest loadSessionFromDiskRequest) {
loadedFromDisk = true;
userId = loadSessionFromDiskRequest.userId();
botToken = loadSessionFromDiskRequest.token();
phoneNumber = loadSessionFromDiskRequest.phoneNumber();
if (loadSessionFromDiskRequest.phoneNumber() != null) {
reactiveApiPublisher = ReactiveApiPublisher.fromPhoneNumber(atomix, kafkaProducer, resultingEventTransformerSet,
liveId,
userId,
phoneNumber
);
} else {
reactiveApiPublisher = ReactiveApiPublisher.fromToken(atomix, kafkaProducer, resultingEventTransformerSet,
liveId,
userId,
botToken
);
}
} else {
return Mono.error(new UnsupportedOperationException("Unexpected value: " + req));
}
// Register the session instance to the local nodes map
var prev = localLiveSessions.put(liveId, reactiveApiPublisher);
if (prev != null) {
LOG.error("User id \"{}\" was already registered locally! {}", liveId, prev);
}
// Register the session instance to the distributed nodes map
return AtomixUtils
.fromCf(() -> userIdToNodeId.put(userId, nodeId).thenApply(Optional::ofNullable))
.flatMap(prevDistributed -> {
if (prevDistributed.isPresent() && prevDistributed.get().value() != null &&
!Objects.equals(this.nodeId, prevDistributed.get().value())) {
LOG.error("Session id \"{}\" is already registered in the node \"{}\"!", liveId,
prevDistributed.get().value());
}
var saveToDiskMono = Mono
.<Void>fromCallable(() -> {
// Save updated sessions configuration to disk
try {
Objects.requireNonNull(diskSessions);
synchronized (diskSessions) {
diskSessions.save();
return null;
}
} catch (IOException e) {
throw new CompletionException("Failed to save disk sessions configuration", e);
}
})
.subscribeOn(Schedulers.boundedElastic());
// Start the session instance
return Mono
.fromCallable(() -> {
Objects.requireNonNull(diskSessions);
synchronized (diskSessions) {
return Objects.requireNonNull(Paths.get(diskSessions.getSettings().path),
"Session " + userId + " path is missing");
}
})
.subscribeOn(Schedulers.boundedElastic())
.flatMap(baseSessionsPath -> {
String diskSessionFolderName = "id" + Long.toUnsignedString(userId);
Path sessionPath = baseSessionsPath.resolve(diskSessionFolderName);
if (!loadedFromDisk) {
// Create the disk session configuration
var diskSession = new DiskSession(botToken, phoneNumber);
return Mono.<Void>fromCallable(() -> {
Objects.requireNonNull(diskSessions);
synchronized (diskSessions) {
diskSessions.getSettings().userIdToSession().put(userId, diskSession);
return null;
}
}).subscribeOn(Schedulers.boundedElastic()).then(saveToDiskMono).thenReturn(sessionPath);
} else {
return Mono.just(sessionPath);
}
})
.doOnNext(path -> reactiveApiPublisher.start(path,
() -> AtomixReactiveApi.this.onPublisherClosed(userId, liveId)
))
.thenReturn(new CreateSessionResponse(liveId));
});
});
});
// Lock sessions creation
return Mono
.usingWhen(AtomixUtils.fromCf(sessionModificationLock::lock),
lockVersion -> unlockedSessionCreationMono,
lockVersion -> AtomixUtils
.fromCf(sessionModificationLock::unlock)
.doOnTerminate(() -> LOG.trace("Released session modification lock for session request: {}", req))
)
.doOnNext(resp -> LOG.debug("Handled session request {}, the response is: {}", req, resp))
.doOnError(ex -> LOG.debug("Handled session request {}, the response is: error", req, ex));
}
private void onPublisherClosed(long userId, Long liveId) {
this.destroySession(userId, nodeId).whenComplete((result, ex) -> {
localLiveSessions.remove(liveId);
if (ex != null) {
LOG.error("Failed to close the session for user {} after it was closed itself", userId);
// Create the session instance
ReactiveApiPublisher reactiveApiPublisher;
boolean loadedFromDisk;
long userId;
String botToken;
Long phoneNumber;
if (req instanceof CreateBotSessionRequest createBotSessionRequest) {
loadedFromDisk = false;
userId = createBotSessionRequest.userId();
botToken = createBotSessionRequest.token();
phoneNumber = null;
reactiveApiPublisher = ReactiveApiPublisher.fromToken(kafkaTDLibServer, resultingEventTransformerSet,
userId,
botToken
);
} else if (req instanceof CreateUserSessionRequest createUserSessionRequest) {
loadedFromDisk = false;
userId = createUserSessionRequest.userId();
botToken = null;
phoneNumber = createUserSessionRequest.phoneNumber();
reactiveApiPublisher = ReactiveApiPublisher.fromPhoneNumber(kafkaTDLibServer, resultingEventTransformerSet,
userId,
phoneNumber
);
} else if (req instanceof LoadSessionFromDiskRequest loadSessionFromDiskRequest) {
loadedFromDisk = true;
userId = loadSessionFromDiskRequest.userId();
botToken = loadSessionFromDiskRequest.token();
phoneNumber = loadSessionFromDiskRequest.phoneNumber();
if (loadSessionFromDiskRequest.phoneNumber() != null) {
reactiveApiPublisher = ReactiveApiPublisher.fromPhoneNumber(kafkaTDLibServer,
resultingEventTransformerSet,
userId,
phoneNumber
);
} else {
LOG.debug("Closed the session for user {} after it was closed itself", userId);
reactiveApiPublisher = ReactiveApiPublisher.fromToken(kafkaTDLibServer,
resultingEventTransformerSet,
userId,
botToken
);
}
});
}
private Mono<Long> nextFreeLiveId() {
return fromCf(nextSessionLiveId::nextId);
}
public Atomix getAtomix() {
return atomix;
}
/**
* Get the list of current sessions
* @return map of user id -> node id
*/
@Override
public Mono<Map<Long, String>> getAllUsers() {
return Flux.defer(() -> {
var it = userIdToNodeId.entrySet().iterator();
var hasNextMono = fromCf(it::hasNext);
var strictNextMono = fromCf(it::next)
.map(elem -> Map.entry(elem.getKey(), elem.getValue().value()));
var nextOrNothingMono = hasNextMono.flatMap(hasNext -> {
if (hasNext) {
return strictNextMono;
} else {
return Mono.empty();
}
});
return nextOrNothingMono.repeatWhen(s -> s.takeWhile(n -> n > 0));
}).collectMap(Entry::getKey, Entry::getValue);
}
@Override
public Set<UserIdAndLiveId> getLocalLiveSessionIds() {
return localLiveSessions
.values()
.stream()
.map(reactiveApiPublisher -> new UserIdAndLiveId(reactiveApiPublisher.userId, reactiveApiPublisher.liveId))
.collect(Collectors.toUnmodifiableSet());
}
@Override
public boolean is(String nodeId) {
if (this.nodeId == null) {
return nodeId == null;
} else {
return Mono.error(new UnsupportedOperationException("Unexpected value: " + req));
}
return this.nodeId.equals(nodeId);
}
@Override
public Mono<Long> resolveUserLiveId(long userId) {
return AtomixUtils
.fromCf(() -> atomix
.getEventService()
.send(SubjectNaming.getDynamicIdResolveSubject(userId),
userId,
Longs::toByteArray,
Longs::fromByteArray,
Duration.ofSeconds(1)
))
.onErrorResume(ex -> {
if (ex instanceof MessagingException.NoRemoteHandler || ex instanceof CancellationException) {
return Mono.empty();
} else {
return Mono.error(ex);
// Register the session instance to the local nodes map
var prev = localSessions.put(userId, reactiveApiPublisher);
if (prev != null) {
LOG.error("User id \"{}\" was already registered locally! {}", userId, prev);
}
var saveToDiskMono = Mono
.<Void>fromCallable(() -> {
// Save updated sessions configuration to disk
try {
Objects.requireNonNull(diskSessions);
synchronized (diskSessions) {
diskSessions.save();
return null;
}
} catch (IOException e) {
throw new CompletionException("Failed to save disk sessions configuration", e);
}
});
})
.subscribeOn(Schedulers.boundedElastic());
// Start the session instance
return Mono
.fromCallable(() -> {
Objects.requireNonNull(diskSessions);
synchronized (diskSessions) {
return Objects.requireNonNull(Paths.get(diskSessions.getSettings().path),
"Session " + userId + " path is missing");
}
})
.subscribeOn(Schedulers.boundedElastic())
.flatMap(baseSessionsPath -> {
String diskSessionFolderName = "id" + Long.toUnsignedString(userId);
Path sessionPath = baseSessionsPath.resolve(diskSessionFolderName);
if (!loadedFromDisk) {
// Create the disk session configuration
var diskSession = new DiskSession(botToken, phoneNumber);
return Mono.<Void>fromCallable(() -> {
Objects.requireNonNull(diskSessions);
synchronized (diskSessions) {
diskSessions.getSettings().userIdToSession().put(userId, diskSession);
return null;
}
}).subscribeOn(Schedulers.boundedElastic()).then(saveToDiskMono).thenReturn(sessionPath);
} else {
return Mono.just(sessionPath);
}
})
.doOnNext(path -> reactiveApiPublisher.start(path, () -> {
localSessions.remove(userId);
LOG.debug("Closed the session for user {} after it was closed itself", userId);
}))
.thenReturn(new CreateSessionResponse(userId));
}
@Override
public ReactiveApiClient dynamicClient(String subGroupId, long userId) {
return new DynamicAtomixReactiveApiClient(this, kafkaConsumer, userId, subGroupId);
}
@Override
public ReactiveApiClient liveClient(String subGroupId, long liveId, long userId) {
return new LiveAtomixReactiveApiClient(atomix, kafkaConsumer, liveId, userId, subGroupId);
}
@Override
public ReactiveApiMultiClient multiClient(String subGroupId) {
return new AtomixReactiveApiMultiClient(this, kafkaConsumer, subGroupId);
public ReactiveApiClient client(String subGroupId, long userId) {
return new LiveAtomixReactiveApiClient(kafkaTDLibClient, userId, subGroupId);
}
@Override
public Mono<Void> close() {
var atomixStopper = fromCf(this.atomix::stop).timeout(Duration.ofSeconds(8), Mono.empty());
var kafkaStopper = Mono.fromRunnable(kafkaProducer::close).subscribeOn(Schedulers.boundedElastic());
return Mono.when(atomixStopper, kafkaStopper);
closeRequested = true;
Mono<?> kafkaServerProducersStopper;
if (kafkaTDLibServer != null) {
kafkaServerProducersStopper = Mono.fromRunnable(kafkaTDLibServer::close).subscribeOn(Schedulers.boundedElastic());
} else {
kafkaServerProducersStopper = Mono.empty();
}
Mono<?> kafkaClientProducersStopper = Mono
.fromRunnable(kafkaTDLibClient::close)
.subscribeOn(Schedulers.boundedElastic());
return Mono.when(kafkaServerProducersStopper, kafkaClientProducersStopper);
}
@Override
public void waitForExit() {
var nanos = Duration.ofSeconds(1).toNanos();
while (!closeRequested && !Thread.interrupted()) {
LockSupport.parkNanos(nanos);
}
}
private record DiskSessionAndId(DiskSession diskSession, long id) {}

View File

@ -1,76 +0,0 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.AtomixUtils.fromCf;
import io.atomix.cluster.messaging.ClusterEventService;
import io.atomix.cluster.messaging.MessagingException;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.OnRequest.Request;
import java.net.ConnectException;
import java.time.Duration;
import java.time.Instant;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.TimeoutException;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
public class AtomixReactiveApiMultiClient implements ReactiveApiMultiClient, AutoCloseable {
private final ClusterEventService eventService;
private final KafkaConsumer kafkaConsumer;
private final String subGroupId;
private volatile boolean closed = false;
AtomixReactiveApiMultiClient(AtomixReactiveApi api, KafkaConsumer kafkaConsumer, String subGroupId) {
this.eventService = api.getAtomix().getEventService();
this.kafkaConsumer = kafkaConsumer;
this.subGroupId = subGroupId;
}
@Override
public Flux<ClientBoundEvent> clientBoundEvents(boolean ack) {
if (closed) {
return Flux.empty();
}
return kafkaConsumer.consumeMessages(subGroupId).map(TimestampedClientBoundEvent::event).takeUntil(s -> closed);
}
@Override
public <T extends TdApi.Object> Mono<T> request(long userId, long liveId, TdApi.Function<T> request, Instant timeout) {
return fromCf(() -> {
if (closed) {
return CompletableFuture.failedFuture(new TdError(500, "Session is closed"));
}
return eventService.send("session-" + liveId + "-requests",
new Request<>(liveId, request, timeout),
LiveAtomixReactiveApiClient::serializeRequest,
LiveAtomixReactiveApiClient::deserializeResponse,
Duration.between(Instant.now(), timeout)
);
}).subscribeOn(Schedulers.boundedElastic()).<T>handle((item, sink) -> {
if (item instanceof TdApi.Error error) {
sink.error(new TdError(error.code, error.message));
} else {
//noinspection unchecked
sink.next((T) item);
}
}).onErrorMap(ex -> {
if (ex instanceof MessagingException.NoRemoteHandler) {
return new TdError(404, "Bot #IDU" + userId + " (live id: " + liveId + ") is not found on the cluster");
} else if (ex instanceof TimeoutException || ex instanceof ConnectException) {
return new TdError(408, "Request Timeout", ex);
} else {
return ex;
}
});
}
@Override
public void close() {
closed = true;
}
}

View File

@ -1,29 +0,0 @@
package it.tdlight.reactiveapi;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.function.Supplier;
import reactor.core.publisher.Mono;
public class AtomixUtils {
public static <T> Mono<T> fromCf(Supplier<? extends CompletableFuture<? extends T>> completableFutureSupplier) {
return Mono.create(sink -> {
var cf = completableFutureSupplier.get();
cf.whenComplete((result, ex) -> {
if (ex != null) {
if (ex instanceof CompletionException) {
sink.error(ex.getCause());
} else {
sink.error(ex);
}
} else if (result != null) {
sink.success(result);
} else {
sink.success();
}
});
sink.onCancel(() -> cf.cancel(true));
});
}
}

View File

@ -2,17 +2,19 @@ package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import io.atomix.cluster.messaging.ClusterEventService;
import io.atomix.cluster.messaging.MessagingException;
import io.atomix.core.Atomix;
import it.tdlight.common.utils.LibraryVersion;
import it.tdlight.jni.TdApi;
import it.tdlight.jni.TdApi.Error;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.Ignored;
import it.tdlight.reactiveapi.Event.OnBotLoginCodeRequested;
import it.tdlight.reactiveapi.Event.OnOtherDeviceLoginRequested;
import it.tdlight.reactiveapi.Event.OnPasswordRequested;
import it.tdlight.reactiveapi.Event.OnRequest;
import it.tdlight.reactiveapi.Event.OnRequest.Request;
import it.tdlight.reactiveapi.Event.OnResponse;
import it.tdlight.reactiveapi.Event.OnResponse.InvalidResponse;
import it.tdlight.reactiveapi.Event.OnResponse.Response;
import it.tdlight.reactiveapi.Event.OnUpdateData;
import it.tdlight.reactiveapi.Event.OnUpdateError;
import it.tdlight.reactiveapi.Event.OnUserLoginCodeRequested;
@ -28,10 +30,14 @@ import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.SerializationException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.kafka.common.errors.SerializationException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
@ -39,79 +45,88 @@ import org.slf4j.LoggerFactory;
import reactor.core.Disposable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoSink;
import reactor.core.publisher.Sinks;
import reactor.core.publisher.Sinks.EmitFailureHandler;
import reactor.core.publisher.Sinks.Many;
import reactor.core.scheduler.Schedulers;
import reactor.util.concurrent.Queues;
abstract class BaseAtomixReactiveApiClient implements ReactiveApiClient, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(BaseAtomixReactiveApiClient.class);
protected final ClusterEventService eventService;
private static final Duration TEN_MS = Duration.ofMillis(10);
protected final long userId;
private Disposable liveIdChangeSubscription;
private Flux<Long> liveIdChange;
private Mono<Long> emptyIdErrorMono;
// Temporary id used to make requests
private final long clientId;
private final Many<OnRequest<?>> requests
= Sinks.many().unicast().onBackpressureBuffer(Queues.<Event.OnRequest<?>>small().get());
private final Map<Long, CompletableFuture<Timestamped<OnResponse<TdApi.Object>>>> responses
= new ConcurrentHashMap<>();
private final AtomicLong requestId = new AtomicLong(0);
public BaseAtomixReactiveApiClient(Atomix atomix, long userId) {
this.eventService = atomix.getEventService();
public BaseAtomixReactiveApiClient(KafkaTdlibClient kafkaTdlibClient, long userId) {
this.userId = userId;
}
this.clientId = System.nanoTime();
kafkaTdlibClient.request().sendMessages(userId, requests.asFlux())
.subscribeOn(Schedulers.boundedElastic())
.subscribe(v -> {}, ex -> LOG.error("Failed to send requests", ex));
protected void initialize() {
this.liveIdChange = liveIdChange().cache(1);
this.liveIdChangeSubscription = liveIdChange
kafkaTdlibClient.response()
.consumeMessages("td-responses", userId)
.filter(response -> response.data().clientId() == clientId)
.doOnNext(response -> {
var responseSink = responses.get(response.data().requestId());
if (responseSink == null) {
LOG.debug("Bot #IDU{} received a response for an unknown request id: {}",
userId, response.data().requestId());
return;
}
responseSink.complete(response);
})
.subscribeOn(Schedulers.parallel())
.subscribe(v -> LOG.debug("Live id of user {} changed: {}", userId, v),
ex -> LOG.error("Failed to retrieve live id of user {}", userId)
);
this.emptyIdErrorMono = Mono.error(() -> new TdError(404, "Bot #IDU" + this.userId
+ " is not found on the cluster, no live id has been associated with it locally"));
.subscribe();
}
@Override
public final <T extends TdApi.Object> Mono<T> request(TdApi.Function<T> request, Instant timeout) {
return liveIdChange
.take(1, true)
.singleOrEmpty()
.switchIfEmpty(emptyIdErrorMono)
.flatMap(liveId -> AtomixUtils
.fromCf(() -> eventService.send("session-" + liveId + "-requests",
new Request<>(liveId, request, timeout),
LiveAtomixReactiveApiClient::serializeRequest,
LiveAtomixReactiveApiClient::deserializeResponse,
Duration.between(Instant.now(), timeout)
))
.subscribeOn(Schedulers.boundedElastic())
.onErrorMap(ex -> {
if (ex instanceof MessagingException.NoRemoteHandler) {
return new TdError(404, "Bot #IDU" + this.userId + " (liveId: " + liveId + ") is not found on the cluster");
} else if (ex instanceof TimeoutException || ex instanceof ConnectException) {
return new TdError(408, "Request Timeout", ex);
return Mono.defer(() -> {
var requestId = this.requestId.getAndIncrement();
var timeoutError = new TdError(408, "Request Timeout");
Mono<T> timeoutErrorMono = Mono.error(timeoutError);
var timeoutDuration = Duration.between(Instant.now(), timeout);
if (timeoutDuration.isNegative() || timeoutDuration.isZero()) {
return timeoutErrorMono;
}
var cf = new CompletableFuture<Timestamped<OnResponse<TdApi.Object>>>();
this.responses.put(requestId, cf);
Mono<T> response = Mono.fromFuture(cf)
.<T>handle((responseObj, sink) -> {
if (Instant.ofEpochMilli(responseObj.timestamp()).compareTo(timeout) > 0) {
sink.error(new TdError(408, "Request Timeout"));
} else if (responseObj.data() instanceof OnResponse.InvalidResponse<?>) {
sink.error(new TdError(400, "Conflicting protocol version"));
} else if (responseObj.data() instanceof OnResponse.Response<?> onResponse) {
if (onResponse.response().getConstructor() == Error.CONSTRUCTOR) {
var tdError = (TdApi.Error) onResponse.response();
sink.error(new TdError(tdError.code, tdError.message));
} else {
return ex;
//noinspection unchecked
var tdResponse = (T) onResponse.response();
sink.next(tdResponse);
}
})
)
.<T>handle((item, sink) -> {
if (item instanceof TdApi.Error error) {
sink.error(new TdError(error.code, error.message));
} else {
//noinspection unchecked
sink.next((T) item);
}
})
.onErrorMap(ex -> {
if (ex instanceof MessagingException.NoRemoteHandler) {
return new TdError(404, "Bot #IDU" + this.userId + " is not found on the cluster");
} else if (ex instanceof TimeoutException || ex instanceof ConnectException) {
return new TdError(408, "Request Timeout", ex);
} else {
return ex;
}
});
} else {
sink.error(new UnsupportedOperationException("Unknown response type: " + responseObj.data().getClass()));
}
})
.timeout(timeoutDuration, timeoutErrorMono.doFirst(() -> this.responses.remove(requestId)));
requests.emitNext(new Request<>(clientId, requestId, request, timeout), EmitFailureHandler.busyLooping(TEN_MS));
return response;
});
}
protected abstract Flux<Long> liveIdChange();
@Override
public final long getUserId() {
return userId;
@ -123,37 +138,6 @@ abstract class BaseAtomixReactiveApiClient implements ReactiveApiClient, AutoClo
}
static TdApi.Object deserializeResponse(byte[] bytes) {
try {
if (bytes == null || bytes.length == 0) {
return null;
}
var dis = new DataInputStream(new ByteArrayInputStream(bytes));
var serialVersion = dis.readInt();
if (serialVersion != SERIAL_VERSION) {
return new TdApi.Error(400, "Conflicting protocol version");
}
return TdApi.Deserializer.deserialize(dis);
} catch (IOException ex) {
throw new SerializationException(ex);
}
}
static byte[] serializeRequest(Request<?> request) {
try (var byteArrayOutputStream = new ByteArrayOutputStream()) {
try (var dataOutputStream = new DataOutputStream(byteArrayOutputStream)) {
dataOutputStream.writeLong(request.liveId());
dataOutputStream.writeInt(SERIAL_VERSION);
dataOutputStream.writeLong(request.timeout().toEpochMilli());
request.request().serialize(dataOutputStream);
dataOutputStream.flush();
return byteArrayOutputStream.toByteArray();
}
} catch (UnsupportedOperationException | IOException ex) {
throw new SerializationException(ex);
}
}
static ClientBoundEvent deserializeEvent(byte[] bytes) {
try (var byteArrayInputStream = new ByteArrayInputStream(bytes)) {
try (var is = new DataInputStream(byteArrayInputStream)) {
@ -164,43 +148,24 @@ abstract class BaseAtomixReactiveApiClient implements ReactiveApiClient, AutoClo
}
}
static List<ClientBoundEvent> deserializeEvents(byte[] bytes) {
try (var byteArrayInputStream = new ByteArrayInputStream(bytes)) {
try (var is = new DataInputStream(byteArrayInputStream)) {
var len = is.readInt();
var result = new ArrayList<ClientBoundEvent>(len);
for (int i = 0; i < len; i++) {
result.add(deserializeEvent(is));
}
return result;
}
} catch (IOException ex) {
throw new SerializationException(ex);
}
}
static @NotNull ClientBoundEvent deserializeEvent(DataInputStream is) throws IOException {
var liveId = is.readLong();
var userId = is.readLong();
var dataVersion = is.readInt();
if (dataVersion != SERIAL_VERSION) {
return new Ignored(liveId, userId);
return new Ignored(userId);
}
return switch (is.readByte()) {
case 0x01 -> new OnUpdateData(liveId, userId, (TdApi.Update) TdApi.Deserializer.deserialize(is));
case 0x02 -> new OnUpdateError(liveId, userId, (TdApi.Error) TdApi.Deserializer.deserialize(is));
case 0x03 -> new OnUserLoginCodeRequested(liveId, userId, is.readLong());
case 0x04 -> new OnBotLoginCodeRequested(liveId, userId, is.readUTF());
case 0x05 -> new OnOtherDeviceLoginRequested(liveId, userId, is.readUTF());
case 0x06 -> new OnPasswordRequested(liveId, userId, is.readUTF(), is.readBoolean(), is.readUTF());
case 0x01 -> new OnUpdateData(userId, (TdApi.Update) TdApi.Deserializer.deserialize(is));
case 0x02 -> new OnUpdateError(userId, (TdApi.Error) TdApi.Deserializer.deserialize(is));
case 0x03 -> new OnUserLoginCodeRequested(userId, is.readLong());
case 0x04 -> new OnBotLoginCodeRequested(userId, is.readUTF());
case 0x05 -> new OnOtherDeviceLoginRequested(userId, is.readUTF());
case 0x06 -> new OnPasswordRequested(userId, is.readUTF(), is.readBoolean(), is.readUTF());
default -> throw new IllegalStateException("Unexpected value: " + is.readByte());
};
}
@Override
public void close() {
if (liveIdChangeSubscription != null) {
liveIdChangeSubscription.dispose();
}
}
}

View File

@ -2,7 +2,6 @@ package it.tdlight.reactiveapi;
import static java.util.Collections.unmodifiableSet;
import io.atomix.core.Atomix;
import it.tdlight.reactiveapi.CreateSessionRequest.CreateBotSessionRequest;
import it.tdlight.reactiveapi.CreateSessionRequest.CreateUserSessionRequest;
import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
@ -16,6 +15,7 @@ import org.jline.reader.LineReader;
import org.jline.reader.LineReaderBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.scheduler.Schedulers;
public class Cli {
@ -27,8 +27,7 @@ public class Cli {
public static void main(String[] args) throws IOException {
var validArgs = Entrypoint.parseArguments(args);
var atomixBuilder = Atomix.builder();
var api = (AtomixReactiveApi) Entrypoint.start(validArgs, atomixBuilder);
var api = (AtomixReactiveApi) Entrypoint.start(validArgs);
AtomicBoolean alreadyShutDown = new AtomicBoolean(false);
AtomicBoolean acceptInputs = new AtomicBoolean(true);
@ -36,7 +35,7 @@ public class Cli {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
acceptInputs.set(false);
if (alreadyShutDown.compareAndSet(false, true)) {
api.getAtomix().stop().join();
api.close().subscribeOn(Schedulers.immediate()).subscribe();
}
}));
@ -92,52 +91,19 @@ public class Cli {
}
private void printSessions(ReactiveApi api, boolean onlyLocal) {
api.getAllUsers().subscribe(sessions -> {
var userIdToLiveId = api
.getLocalLiveSessionIds()
.stream()
.collect(Collectors.toMap(UserIdAndLiveId::userId, k -> Set.of(k.liveId()), (a, b) -> {
var r = new LongOpenHashSet(a.size() + b.size());
r.addAll(a);
r.addAll(b);
return unmodifiableSet(r);
}));
StringBuilder sb = new StringBuilder();
sb.append("Sessions:\n");
for (var userEntry : sessions.entrySet()) {
var userId = userEntry.getKey();
var nodeId = userEntry.getValue();
if (!onlyLocal || api.is(nodeId)) {
sb.append(" - session #IDU").append(userId);
if (!onlyLocal) {
sb.append(": ").append(nodeId);
} else {
sb
.append(": liveId=")
.append(userIdToLiveId
.get(userId)
.stream()
.map(Object::toString)
.collect(Collectors.joining(", ", "(", ")")));
}
sb.append("\n");
}
}
LOG.info(sb.toString());
});
LOG.info("Not implemented");
}
@Override
protected void shutdown() {
acceptInputs.set(false);
if (alreadyShutDown.compareAndSet(false, true)) {
api.getAtomix().stop().join();
System.exit(0);
Runtime.getRuntime().exit(0);
}
}
};
console.start();
api.waitForExit();
}
private static void createSession(ReactiveApi api, String commandArgs) {

View File

@ -11,14 +11,11 @@ public class ClusterSettings {
public String id;
public List<String> kafkaBootstrapServers;
public List<NodeSettings> nodes;
@JsonCreator
public ClusterSettings(@JsonProperty(required = true, value = "id") String id,
@JsonProperty(required = true, value = "kafkaBootstrapServers") List<String> kafkaBootstrapServers,
@JsonProperty(required = true, value = "nodes") List<NodeSettings> nodes) {
@JsonProperty(required = true, value = "kafkaBootstrapServers") List<String> kafkaBootstrapServers) {
this.id = id;
this.kafkaBootstrapServers = kafkaBootstrapServers;
this.nodes = nodes;
}
}

View File

@ -8,50 +8,18 @@ import it.tdlight.reactiveapi.CreateSessionRequest.LoadSessionFromDiskRequest;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.commons.lang3.SerializationException;
import org.apache.kafka.common.errors.SerializationException;
public sealed interface CreateSessionRequest permits CreateUserSessionRequest, CreateBotSessionRequest,
LoadSessionFromDiskRequest {
long userId();
static CreateSessionRequest deserializeBytes(byte[] bytes) {
byte type = bytes[0];
long userId = Longs.fromBytes(bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8]);
return switch (type) {
case 0 -> new CreateUserSessionRequest(userId,
Longs.fromBytes(bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14], bytes[15], bytes[16])
);
case 1 -> new CreateBotSessionRequest(userId,
new String(bytes, 1 + Long.BYTES + Integer.BYTES, Ints.fromBytes(bytes[9], bytes[10], bytes[11], bytes[12]))
);
case 2 -> {
var dis = new DataInputStream(new ByteArrayInputStream(bytes, 1 + Long.BYTES, bytes.length - (1 + Long.BYTES)));
try {
var isBot = dis.readBoolean();
String token;
Long phoneNumber;
if (isBot) {
token = dis.readUTF();
phoneNumber = null;
} else {
token = null;
phoneNumber = dis.readLong();
}
yield new LoadSessionFromDiskRequest(userId, token, phoneNumber, dis.readBoolean());
} catch (IOException e) {
throw new SerializationException(e);
}
}
default -> throw new IllegalStateException("Unexpected value: " + type);
};
}
record CreateUserSessionRequest(long userId, long phoneNumber) implements CreateSessionRequest {}
final record CreateUserSessionRequest(long userId, long phoneNumber) implements CreateSessionRequest {}
record CreateBotSessionRequest(long userId, String token) implements CreateSessionRequest {}
final record CreateBotSessionRequest(long userId, String token) implements CreateSessionRequest {}
final record LoadSessionFromDiskRequest(long userId, String token, Long phoneNumber, boolean createNew) implements
record LoadSessionFromDiskRequest(long userId, String token, Long phoneNumber, boolean createNew) implements
CreateSessionRequest {
public LoadSessionFromDiskRequest {

View File

@ -5,7 +5,4 @@ import com.google.common.primitives.Longs;
public record CreateSessionResponse(long sessionId) {
public static byte[] serializeBytes(CreateSessionResponse createSessionResponse) {
return Longs.toByteArray(createSessionResponse.sessionId);
}
}

View File

@ -1,115 +0,0 @@
package it.tdlight.reactiveapi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import java.time.Duration;
import java.util.concurrent.CancellationException;
import java.util.concurrent.atomic.AtomicReference;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.Disposable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class DynamicAtomixReactiveApiClient extends BaseAtomixReactiveApiClient implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DynamicAtomixReactiveApiClient.class);
private record CurrentLiveId(long sinceTimestamp, long liveId) implements Comparable<CurrentLiveId> {
@Override
public int compareTo(@NotNull DynamicAtomixReactiveApiClient.CurrentLiveId o) {
return Long.compare(this.sinceTimestamp, o.sinceTimestamp);
}
}
private final ReactiveApi api;
private final AtomicReference<Disposable> clientBoundEventsSubscription = new AtomicReference<>(null);
private final long userId;
private final Flux<TimestampedClientBoundEvent> clientBoundEvents;
private final Flux<CurrentLiveId> liveIdChange;
private volatile boolean closed;
DynamicAtomixReactiveApiClient(AtomixReactiveApi api, KafkaConsumer kafkaConsumer, long userId, String subGroupId) {
super(api.getAtomix(), userId);
this.api = api;
this.userId = userId;
var clientBoundEvents = kafkaConsumer
.consumeMessages(subGroupId, userId)
.takeWhile(n -> !closed)
.publish()
.autoConnect(3, clientBoundEventsSubscription::set)
.onErrorResume(CancellationException.class, ex -> {
if ("Disconnected".equals(ex.getMessage())) {
LOG.debug("Disconnected client {}", userId, ex);
return Mono.empty();
} else {
return Mono.error(ex);
}
});
var firstLiveId = clientBoundEvents
.take(1, true)
.singleOrEmpty()
.map(e -> new CurrentLiveId(e.timestamp(), e.event().liveId()));
var sampledLiveIds = clientBoundEvents
.skip(1)
.sample(Duration.ofSeconds(1))
.map(e -> new CurrentLiveId(e.timestamp(), e.event().liveId()));
var startupLiveId = api
.resolveUserLiveId(userId)
.doOnError(ex -> LOG.error("Failed to resolve live id of user {}", userId, ex))
.onErrorResume(ex -> Mono.empty())
.map(liveId -> new CurrentLiveId(System.currentTimeMillis(), liveId));
liveIdChange = startupLiveId
.concatWith(Flux.merge(firstLiveId, sampledLiveIds))
.scan((prev, next) -> {
if (next.compareTo(prev) > 0) {
LOG.trace("Replaced id {} with id {}", prev, next);
return next;
} else {
return prev;
}
})
.distinctUntilChanged(CurrentLiveId::liveId);
// minimum 3 subscribers:
// - firstClientBoundEvent
// - sampledClientBoundEvents
// - clientBoundEvents
this.clientBoundEvents = clientBoundEvents;
super.initialize();
}
@Override
public Flux<ClientBoundEvent> clientBoundEvents() {
return clientBoundEvents.doFirst(() -> {
if (this.clientBoundEventsSubscription.get() != null) {
throw new UnsupportedOperationException("Already subscribed");
}
}).map(TimestampedClientBoundEvent::event);
}
@Override
protected Flux<Long> liveIdChange() {
return liveIdChange.map(CurrentLiveId::liveId);
}
public void close() {
this.closed = true;
var clientBoundEventsSubscription = this.clientBoundEventsSubscription.get();
if (clientBoundEventsSubscription != null && !clientBoundEventsSubscription.isDisposed()) {
try {
clientBoundEventsSubscription.dispose();
} catch (CancellationException ignored) {
LOG.debug("Reactive api client for user {} has been cancelled", userId);
}
}
super.close();
}
}

View File

@ -4,13 +4,6 @@ import static java.util.Collections.unmodifiableSet;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import io.atomix.cluster.Node;
import io.atomix.cluster.discovery.BootstrapDiscoveryProvider;
import io.atomix.core.Atomix;
import io.atomix.core.AtomixBuilder;
import io.atomix.core.profile.ConsensusProfileConfig;
import io.atomix.core.profile.Profile;
import io.atomix.protocols.raft.partition.RaftPartitionGroup;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.nio.file.Files;
@ -45,7 +38,7 @@ public class Entrypoint {
return new ValidEntrypointArgs(args[0], args[1], args[2]);
}
public static ReactiveApi start(ValidEntrypointArgs args, AtomixBuilder atomixBuilder) throws IOException {
public static ReactiveApi start(ValidEntrypointArgs args) throws IOException {
// Read settings
ClusterSettings clusterSettings;
InstanceSettings instanceSettings;
@ -65,19 +58,15 @@ public class Entrypoint {
}
}
return start(clusterSettings, instanceSettings, diskSessions, atomixBuilder);
return start(clusterSettings, instanceSettings, diskSessions);
}
public static ReactiveApi start(ClusterSettings clusterSettings,
InstanceSettings instanceSettings,
@Nullable DiskSessionsManager diskSessions,
AtomixBuilder atomixBuilder) {
atomixBuilder.withCompatibleSerialization(false);
atomixBuilder.withClusterId(clusterSettings.id);
@Nullable DiskSessionsManager diskSessions) {
Set<ResultingEventTransformer> resultingEventTransformerSet;
String nodeId;
boolean clientOnly = false;
if (instanceSettings.client) {
if (diskSessions != null) {
throw new IllegalArgumentException("A client instance can't have a session manager!");
@ -85,31 +74,12 @@ public class Entrypoint {
if (instanceSettings.clientAddress == null) {
throw new IllegalArgumentException("A client instance must have an address (host:port)");
}
var randomizedClientId = instanceSettings.id + "-" + ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE);
var address = Address.fromString(instanceSettings.clientAddress);
atomixBuilder.withMemberId(randomizedClientId).withHost(address.host()).withPort(address.port());
nodeId = null;
clientOnly = true;
resultingEventTransformerSet = Set.of();
} else {
if (diskSessions == null) {
throw new IllegalArgumentException("A full instance must have a session manager!");
}
// Find node settings
var nodeSettingsOptional = clusterSettings.nodes
.stream()
.filter(node -> node.id.equals(instanceSettings.id))
.findAny();
// Check node settings presence
if (nodeSettingsOptional.isEmpty()) {
System.err.printf("Node id \"%s\" has not been described in cluster.yaml nodes list%n", instanceSettings.id);
System.exit(2);
}
var nodeSettings = nodeSettingsOptional.get();
var address = Address.fromString(nodeSettings.address);
atomixBuilder.withMemberId(instanceSettings.id).withHost(address.host()).withPort(address.port());
resultingEventTransformerSet = new HashSet<>();
if (instanceSettings.resultingEventTransformers != null) {
@ -128,59 +98,12 @@ public class Entrypoint {
}
}
nodeId = nodeSettings.id;
resultingEventTransformerSet = unmodifiableSet(resultingEventTransformerSet);
}
var bootstrapDiscoveryProviderNodes = new ArrayList<Node>();
List<String> systemPartitionGroupMembers = new ArrayList<>();
for (NodeSettings node : clusterSettings.nodes) {
var address = Address.fromString(node.address);
bootstrapDiscoveryProviderNodes.add(Node
.builder()
.withId(node.id)
.withHost(address.host())
.withPort(address.port())
.build());
systemPartitionGroupMembers.add(node.id);
}
var bootstrapDiscoveryProviderBuilder = BootstrapDiscoveryProvider.builder();
bootstrapDiscoveryProviderBuilder.withNodes(bootstrapDiscoveryProviderNodes).build();
atomixBuilder.withMembershipProvider(bootstrapDiscoveryProviderBuilder.build());
atomixBuilder.withManagementGroup(RaftPartitionGroup
.builder("system")
.withNumPartitions(1)
.withMembers(systemPartitionGroupMembers)
.withDataDirectory(Paths.get(".data-" + instanceSettings.id).toFile())
.build());
atomixBuilder.withShutdownHook(false);
atomixBuilder.withTypeRegistrationRequired();
if (instanceSettings.client) {
atomixBuilder.addProfile(Profile.client());
} else {
var prof = Profile.consensus(systemPartitionGroupMembers);
var profCfg = (ConsensusProfileConfig) prof.config();
//profCfg.setDataGroup("raft");
profCfg.setDataPath(".data-" + instanceSettings.id);
profCfg.setPartitions(3);
atomixBuilder.addProfile(prof);
//atomixBuilder.addProfile(Profile.dataGrid(32));
}
var atomix = atomixBuilder.build();
TdSerializer.register(atomix.getSerializationService());
atomix.start().join();
var kafkaParameters = new KafkaParameters(clusterSettings, instanceSettings.id);
var api = new AtomixReactiveApi(nodeId, atomix, kafkaParameters, diskSessions, resultingEventTransformerSet);
var api = new AtomixReactiveApi(clientOnly, kafkaParameters, diskSessions, resultingEventTransformerSet);
LOG.info("Starting ReactiveApi...");
@ -193,7 +116,7 @@ public class Entrypoint {
public static void main(String[] args) throws IOException {
var validArgs = parseArguments(args);
var atomixBuilder = Atomix.builder().withShutdownHookEnabled();
start(validArgs, atomixBuilder);
var api = start(validArgs);
api.waitForExit();
}
}

View File

@ -2,30 +2,16 @@ package it.tdlight.reactiveapi;
import it.tdlight.common.utils.LibraryVersion;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.OnRequest.InvalidRequest;
import it.tdlight.reactiveapi.Event.OnRequest.Request;
import it.tdlight.reactiveapi.Event.ServerBoundEvent;
import java.io.DataInput;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.SerializationException;
import org.jetbrains.annotations.Nullable;
import java.util.Arrays;
/**
* Any event received from a session
*/
public sealed interface Event {
int SERIAL_VERSION = ArrayUtils.hashCode(LibraryVersion.VERSION.getBytes(StandardCharsets.US_ASCII));
/**
*
* @return temporary unique identifier of the session
*/
long liveId();
int SERIAL_VERSION = Arrays.hashCode(LibraryVersion.VERSION.getBytes(StandardCharsets.US_ASCII));
/**
* Event received after choosing the user id of the session
@ -46,49 +32,49 @@ public sealed interface Event {
*/
sealed interface OnLoginCodeRequested extends ClientBoundEvent {}
record OnUserLoginCodeRequested(long liveId, long userId, long phoneNumber) implements OnLoginCodeRequested {}
record OnUserLoginCodeRequested(long userId, long phoneNumber) implements OnLoginCodeRequested {}
record OnBotLoginCodeRequested(long liveId, long userId, String token) implements OnLoginCodeRequested {}
record OnBotLoginCodeRequested(long userId, String token) implements OnLoginCodeRequested {}
record OnOtherDeviceLoginRequested(long liveId, long userId, String link) implements ClientBoundEvent {}
record OnOtherDeviceLoginRequested(long userId, String link) implements ClientBoundEvent {}
record OnPasswordRequested(long liveId, long userId, String passwordHint, boolean hasRecoveryEmail,
record OnPasswordRequested(long userId, String passwordHint, boolean hasRecoveryEmail,
String recoveryEmailPattern) implements ClientBoundEvent {}
record Ignored(long liveId, long userId) implements ClientBoundEvent {}
record Ignored(long userId) implements ClientBoundEvent {}
/**
* Event received from TDLib
*/
sealed interface OnUpdate extends ClientBoundEvent {}
record OnUpdateData(long liveId, long userId, TdApi.Update update) implements OnUpdate {}
record OnUpdateData(long userId, TdApi.Update update) implements OnUpdate {}
record OnUpdateError(long liveId, long userId, TdApi.Error error) implements OnUpdate {}
record OnUpdateError(long userId, TdApi.Error error) implements OnUpdate {}
sealed interface OnRequest<T extends TdApi.Object> extends ServerBoundEvent {
record Request<T extends TdApi.Object>(long liveId, TdApi.Function<T> request, Instant timeout)
implements OnRequest<T> {}
record Request<T extends TdApi.Object>(long clientId, long requestId, TdApi.Function<T> request,
Instant timeout) implements OnRequest<T> {}
record InvalidRequest<T extends TdApi.Object>(long liveId) implements OnRequest<T> {}
record InvalidRequest<T extends TdApi.Object>(long clientId, long requestId) implements OnRequest<T> {}
static <T extends TdApi.Object> Event.OnRequest<T> deserialize(DataInput dataInput) {
try {
var liveId = dataInput.readLong();
var dataVersion = dataInput.readInt();
if (dataVersion != SERIAL_VERSION) {
// Deprecated request
return new InvalidRequest<>(liveId);
}
long millis = dataInput.readLong();
var timeout = Instant.ofEpochMilli(millis);
@SuppressWarnings("unchecked")
TdApi.Function<T> request = (TdApi.Function<T>) TdApi.Deserializer.deserialize(dataInput);
return new Request<>(liveId, request, timeout);
} catch (UnsupportedOperationException | IOException e) {
throw new SerializationException(e);
}
}
long clientId();
long requestId();
}
sealed interface OnResponse<T extends TdApi.Object> extends ClientBoundEvent {
record Response<T extends TdApi.Object>(long clientId, long requestId, long userId,
T response) implements OnResponse<T> {}
record InvalidResponse<T extends TdApi.Object>(long clientId, long requestId, long userId) implements
OnResponse<T> {}
long clientId();
long requestId();
}
}

View File

@ -0,0 +1,38 @@
package it.tdlight.reactiveapi;
import org.apache.kafka.common.serialization.Serializer;
public enum KafkaChannelName {
CLIENT_BOUND_EVENT("event", ClientBoundEventSerializer.class, ClientBoundEventDeserializer.class),
TDLIB_REQUEST("request", TdlibRequestSerializer.class, TdlibRequestDeserializer.class),
TDLIB_RESPONSE("response", TdlibResponseSerializer.class, TdlibResponseDeserializer.class);
private final String name;
private final Class<?> serializerClass;
private final Class<?> deserializerClass;
KafkaChannelName(String kafkaName,
Class<?> serializerClass,
Class<?> deserializerClass) {
this.name = kafkaName;
this.serializerClass = serializerClass;
this.deserializerClass = deserializerClass;
}
public String getKafkaName() {
return name;
}
public Class<?> getSerializerClass() {
return serializerClass;
}
public Class<?> getDeserializerClass() {
return deserializerClass;
}
@Override
public String toString() {
return name;
}
}

View File

@ -0,0 +1,16 @@
package it.tdlight.reactiveapi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
public class KafkaClientBoundConsumer extends KafkaConsumer<ClientBoundEvent> {
public KafkaClientBoundConsumer(KafkaParameters kafkaParameters) {
super(kafkaParameters);
}
@Override
public KafkaChannelName getChannelName() {
return KafkaChannelName.CLIENT_BOUND_EVENT;
}
}

View File

@ -0,0 +1,16 @@
package it.tdlight.reactiveapi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.OnRequest;
public class KafkaClientBoundProducer extends KafkaProducer<ClientBoundEvent> {
public KafkaClientBoundProducer(KafkaParameters kafkaParameters) {
super(kafkaParameters);
}
@Override
public KafkaChannelName getChannelName() {
return KafkaChannelName.CLIENT_BOUND_EVENT;
}
}

View File

@ -1,8 +1,9 @@
package it.tdlight.reactiveapi;
import static java.lang.Math.toIntExact;
import it.tdlight.common.Init;
import it.tdlight.common.utils.CantLoadLibrary;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
@ -22,7 +23,7 @@ import reactor.kafka.receiver.KafkaReceiver;
import reactor.kafka.receiver.ReceiverOptions;
import reactor.util.retry.Retry;
public class KafkaConsumer {
public abstract class KafkaConsumer<K> {
private static final Logger LOG = LogManager.getLogger(KafkaConsumer.class);
@ -32,7 +33,7 @@ public class KafkaConsumer {
this.kafkaParameters = kafkaParameters;
}
public KafkaReceiver<Integer, ClientBoundEvent> createReceiver(@NotNull String groupId, @Nullable Long userId) {
public KafkaReceiver<Integer, K> createReceiver(@NotNull String groupId, @Nullable Long userId) {
try {
Init.start();
} catch (CantLoadLibrary e) {
@ -44,48 +45,47 @@ public class KafkaConsumer {
props.put(ConsumerConfig.CLIENT_ID_CONFIG, kafkaParameters.clientId() + (userId != null ? ("_" + userId) : ""));
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ClientBoundEventDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, getChannelName().getDeserializerClass());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
ReceiverOptions<Integer, ClientBoundEvent> receiverOptions = ReceiverOptions
.<Integer, ClientBoundEvent>create(props)
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, toIntExact(Duration.ofMinutes(5).toMillis()));
ReceiverOptions<Integer, K> receiverOptions = ReceiverOptions
.<Integer, K>create(props)
.commitInterval(Duration.ofSeconds(10))
.commitBatchSize(64)
.maxCommitAttempts(100)
.maxDeferredCommits(100);
Pattern pattern;
if (userId == null) {
pattern = Pattern.compile("tdlib\\.event\\.[0-9]+");
pattern = Pattern.compile("tdlib\\." + getChannelName() + "\\.\\d+");
} else {
pattern = Pattern.compile("tdlib\\.event\\." + userId);
pattern = Pattern.compile("tdlib\\." + getChannelName() + "\\." + userId);
}
ReceiverOptions<Integer, ClientBoundEvent> options = receiverOptions
ReceiverOptions<Integer, K> options = receiverOptions
.subscription(pattern)
.addAssignListener(partitions -> LOG.debug("onPartitionsAssigned {}", partitions))
.addRevokeListener(partitions -> LOG.debug("onPartitionsRevoked {}", partitions));
return KafkaReceiver.create(options);
}
private Flux<TimestampedClientBoundEvent> retryIfCleanup(Flux<TimestampedClientBoundEvent> clientBoundEventFlux) {
return clientBoundEventFlux.retryWhen(Retry
public abstract KafkaChannelName getChannelName();
protected Flux<Timestamped<K>> retryIfCleanup(Flux<Timestamped<K>> eventFlux) {
return eventFlux.retryWhen(Retry
.backoff(Long.MAX_VALUE, Duration.ofMillis(100))
.maxBackoff(Duration.ofSeconds(5))
.filter(ex -> ex instanceof RebalanceInProgressException)
.doBeforeRetry(s -> LOG.warn("Rebalancing in progress")));
}
public Flux<TimestampedClientBoundEvent> consumeMessages(@NotNull String subGroupId, long userId, long liveId) {
return consumeMessagesInternal(subGroupId, userId).filter(e -> e.event().liveId() == liveId);
}
public Flux<TimestampedClientBoundEvent> consumeMessages(@NotNull String subGroupId, long userId) {
public Flux<Timestamped<K>> consumeMessages(@NotNull String subGroupId, long userId) {
return consumeMessagesInternal(subGroupId, userId);
}
public Flux<TimestampedClientBoundEvent> consumeMessages(@NotNull String subGroupId) {
public Flux<Timestamped<K>> consumeMessages(@NotNull String subGroupId) {
return consumeMessagesInternal(subGroupId, null);
}
private Flux<TimestampedClientBoundEvent> consumeMessagesInternal(@NotNull String subGroupId, @Nullable Long userId) {
private Flux<Timestamped<K>> consumeMessagesInternal(@NotNull String subGroupId, @Nullable Long userId) {
return createReceiver(kafkaParameters.groupId() + "-" + subGroupId, userId)
.receive()
.log("consume-messages" + (userId != null ? "-" + userId : ""),
@ -98,9 +98,9 @@ public class KafkaConsumer {
.doOnNext(result -> result.receiverOffset().acknowledge())
.map(record -> {
if (record.timestampType() == TimestampType.CREATE_TIME) {
return new TimestampedClientBoundEvent(record.timestamp(), record.value());
return new Timestamped<>(record.timestamp(), record.value());
} else {
return new TimestampedClientBoundEvent(1, record.value());
return new Timestamped<>(1, record.value());
}
})
.transform(this::retryIfCleanup);

View File

@ -1,6 +1,5 @@
package it.tdlight.reactiveapi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
@ -17,12 +16,11 @@ import reactor.kafka.sender.KafkaSender;
import reactor.kafka.sender.SenderOptions;
import reactor.kafka.sender.SenderRecord;
public class KafkaProducer {
public abstract class KafkaProducer<K> {
private static final Logger LOG = LogManager.getLogger(KafkaProducer.class);
private final KafkaSender<Integer, ClientBoundEvent> sender;
private final KafkaSender<Integer, K> sender;
public KafkaProducer(KafkaParameters kafkaParameters) {
Map<String, Object> props = new HashMap<>();
@ -30,19 +28,22 @@ public class KafkaProducer {
props.put(ProducerConfig.CLIENT_ID_CONFIG, kafkaParameters.clientId());
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ClientBoundEventSerializer.class);
SenderOptions<Integer, ClientBoundEvent> senderOptions = SenderOptions.create(props);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, getChannelName().getSerializerClass());
SenderOptions<Integer, K> senderOptions = SenderOptions.create(props);
sender = KafkaSender.create(senderOptions.maxInFlight(1024));
}
public Mono<Void> sendMessages(UserTopic userId, Flux<ClientBoundEvent> eventsFlux) {
public abstract KafkaChannelName getChannelName();
public Mono<Void> sendMessages(long userId, Flux<K> eventsFlux) {
var userTopic = new UserTopic(getChannelName(), userId);
return eventsFlux
.<SenderRecord<Integer, ClientBoundEvent, Integer>>map(event -> SenderRecord.create(new ProducerRecord<>(
userId.getTopic(),
.<SenderRecord<Integer, K, Integer>>map(event -> SenderRecord.create(new ProducerRecord<>(
userTopic.getTopic(),
event
), null))
.log("produce-messages-" + userId,
.log("produce-messages-" + userTopic,
Level.FINEST,
SignalType.REQUEST,
SignalType.ON_NEXT,

View File

@ -0,0 +1,15 @@
package it.tdlight.reactiveapi;
import it.tdlight.jni.TdApi;
import java.io.Closeable;
import java.io.IOException;
public record KafkaTdlibClient(KafkaTdlibRequestProducer request,
KafkaTdlibResponseConsumer response,
KafkaClientBoundConsumer events) implements Closeable {
@Override
public void close() {
request.close();
}
}

View File

@ -0,0 +1,19 @@
package it.tdlight.reactiveapi;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.OnRequest;
import it.tdlight.reactiveapi.Event.ServerBoundEvent;
public class KafkaTdlibRequestConsumer extends KafkaConsumer<OnRequest<TdApi.Object>> {
public KafkaTdlibRequestConsumer(KafkaParameters kafkaParameters) {
super(kafkaParameters);
}
@Override
public KafkaChannelName getChannelName() {
return KafkaChannelName.TDLIB_REQUEST;
}
}

View File

@ -0,0 +1,15 @@
package it.tdlight.reactiveapi;
import it.tdlight.reactiveapi.Event.OnRequest;
public class KafkaTdlibRequestProducer extends KafkaProducer<OnRequest<?>> {
public KafkaTdlibRequestProducer(KafkaParameters kafkaParameters) {
super(kafkaParameters);
}
@Override
public KafkaChannelName getChannelName() {
return KafkaChannelName.TDLIB_REQUEST;
}
}

View File

@ -0,0 +1,18 @@
package it.tdlight.reactiveapi;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.OnResponse;
public class KafkaTdlibResponseConsumer extends KafkaConsumer<OnResponse<TdApi.Object>> {
public KafkaTdlibResponseConsumer(KafkaParameters kafkaParameters) {
super(kafkaParameters);
}
@Override
public KafkaChannelName getChannelName() {
return KafkaChannelName.TDLIB_RESPONSE;
}
}

View File

@ -0,0 +1,17 @@
package it.tdlight.reactiveapi;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import it.tdlight.reactiveapi.Event.OnResponse;
public class KafkaTdlibResponseProducer extends KafkaProducer<OnResponse<TdApi.Object>> {
public KafkaTdlibResponseProducer(KafkaParameters kafkaParameters) {
super(kafkaParameters);
}
@Override
public KafkaChannelName getChannelName() {
return KafkaChannelName.TDLIB_RESPONSE;
}
}

View File

@ -0,0 +1,16 @@
package it.tdlight.reactiveapi;
import it.tdlight.jni.TdApi;
import java.io.Closeable;
import java.io.IOException;
public record KafkaTdlibServer(KafkaTdlibRequestConsumer request,
KafkaTdlibResponseProducer response,
KafkaClientBoundProducer events) implements Closeable {
@Override
public void close() {
response.close();
events.close();
}
}

View File

@ -1,6 +1,5 @@
package it.tdlight.reactiveapi;
import io.atomix.core.Atomix;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ -8,19 +7,12 @@ import reactor.core.publisher.Mono;
public class LiveAtomixReactiveApiClient extends BaseAtomixReactiveApiClient {
private final Flux<ClientBoundEvent> clientBoundEvents;
private final long liveId;
LiveAtomixReactiveApiClient(Atomix atomix,
KafkaConsumer kafkaConsumer,
long liveId,
long userId,
String subGroupId) {
super(atomix, userId);
this.clientBoundEvents = kafkaConsumer
.consumeMessages(subGroupId, userId, liveId)
.map(TimestampedClientBoundEvent::event);
this.liveId = liveId;
super.initialize();
LiveAtomixReactiveApiClient(KafkaTdlibClient kafkaTdlibClient, long userId, String subGroupId) {
super(kafkaTdlibClient, userId);
this.clientBoundEvents = kafkaTdlibClient.events()
.consumeMessages(subGroupId, userId)
.map(Timestamped::data);
}
@Override
@ -28,8 +20,4 @@ public class LiveAtomixReactiveApiClient extends BaseAtomixReactiveApiClient {
return clientBoundEvents;
}
@Override
protected Flux<Long> liveIdChange() {
return Flux.just(liveId);
}
}

View File

@ -1,18 +0,0 @@
package it.tdlight.reactiveapi;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonProperty.Access;
import org.jetbrains.annotations.NotNull;
public class NodeSettings {
public @NotNull String id;
public @NotNull String address;
public NodeSettings(@JsonProperty(required = true, value = "id") @NotNull String id,
@JsonProperty(required = true, value = "address") @NotNull String address) {
this.id = id;
this.address = address;
}
}

View File

@ -1,199 +0,0 @@
package it.tdlight.reactiveapi;
import static java.util.Objects.requireNonNull;
import static java.util.Objects.requireNonNullElse;
import it.tdlight.jni.TdApi.AuthorizationStateClosed;
import it.tdlight.jni.TdApi.AuthorizationStateClosing;
import it.tdlight.jni.TdApi.AuthorizationStateLoggingOut;
import it.tdlight.jni.TdApi.AuthorizationStateReady;
import it.tdlight.jni.TdApi.Close;
import it.tdlight.jni.TdApi.UpdateAuthorizationState;
import it.tdlight.jni.TdApi.UpdateNewMessage;
import it.tdlight.reactiveapi.Event.OnUpdateData;
import java.time.Duration;
import java.time.Instant;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentHashMap.KeySetView;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.Disposable;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
import reactor.util.retry.Retry;
public class PeriodicRestarter {
private static final Logger LOG = LoggerFactory.getLogger(PeriodicRestarter.class);
private static Duration JITTER_MAX_DELAY = Duration.ofMinutes(5);
private final ReactiveApi api;
private final Duration interval;
private final Set<Long> restartUserIds;
private final ReactiveApiMultiClient multiClient;
/**
* Live id -> x
*/
private final ConcurrentMap<Long, Disposable> closeManagedByPeriodicRestarter = new ConcurrentHashMap<>();
/**
* Live id -> x
*/
private final ConcurrentMap<Long, Boolean> closingByPeriodicRestarter = new ConcurrentHashMap<>();
/**
* Live id -> x
*/
private final ConcurrentMap<Long, Boolean> sessionAuthReady = new ConcurrentHashMap<>();
/**
* Useful to register sessions at startup
* User id -> x
*/
private final KeySetView<Long, Object> seenUsers = new ConcurrentHashMap<Long, Object>().keySet(new Object());
public PeriodicRestarter(ReactiveApi api, Duration interval, Set<Long> restartUserIds) {
this.api = api;
this.interval = interval;
this.restartUserIds = restartUserIds;
this.multiClient = api.multiClient("periodic-restarter");
}
public Mono<Void> start() {
return Mono.fromRunnable(() -> {
LOG.info("Starting periodic restarter...");
multiClient
.clientBoundEvents(true)
// Filter events to reduce overhead
.filter(event -> {
boolean isAllowedUpdate;
if (event instanceof OnUpdateData onUpdateData) {
isAllowedUpdate = switch (onUpdateData.update().getConstructor()) {
case UpdateAuthorizationState.CONSTRUCTOR, UpdateNewMessage.CONSTRUCTOR -> true;
default -> false;
};
} else {
isAllowedUpdate = false;
}
return isAllowedUpdate && restartUserIds.contains(event.userId());
})
.cast(OnUpdateData.class)
.doOnNext(event -> {
if (event.update() instanceof UpdateAuthorizationState updateAuthorizationState) {
if (updateAuthorizationState.authorizationState instanceof AuthorizationStateReady) {
// Session is now ready
this.sessionAuthReady.put(event.liveId(), true);
onSessionReady(event.liveId(), event.userId());
} else if (updateAuthorizationState.authorizationState instanceof AuthorizationStateLoggingOut) {
// Session is not ready anymore
this.sessionAuthReady.remove(event.liveId(), false);
} else if (updateAuthorizationState.authorizationState instanceof AuthorizationStateClosing) {
// Session is not ready anymore
this.sessionAuthReady.remove(event.liveId(), false);
} else if (updateAuthorizationState.authorizationState instanceof AuthorizationStateClosed) {
// Session is not ready anymore
this.sessionAuthReady.remove(event.liveId(), false);
Boolean prev = closingByPeriodicRestarter.remove(event.liveId());
var disposable = closeManagedByPeriodicRestarter.remove(event.userId());
boolean managed = prev != null && prev;
// Check if the live session is managed by the periodic restarter
if (managed) {
LOG.info("The session #IDU{} (liveId: {}) is being started", event.userId(), event.liveId());
// Restart the session
api.tryReviveSession(event.userId()).subscribeOn(Schedulers.parallel()).subscribe();
}
// Dispose restarter anyway
if (disposable != null && !disposable.isDisposed()) {
disposable.dispose();
}
}
} else if (event.update() instanceof UpdateNewMessage) {
if (seenUsers.add(event.userId())) {
LOG.info("Found a running bot that wasn't registered: {} (liveId: {})", event.userId(), event.liveId());
var wasReady = requireNonNullElse(this.sessionAuthReady.put(event.liveId(), true), false);
if (!wasReady) {
onSessionReady(event.liveId(), event.userId());
}
}
}
})
.subscribeOn(Schedulers.parallel())
.subscribe();
LOG.info("Started periodic restarter");
});
}
private void onSessionReady(long liveId, long userId) {
Duration maxRandomDelay;
if (JITTER_MAX_DELAY.compareTo(interval) < 0) {
maxRandomDelay = JITTER_MAX_DELAY;
} else {
maxRandomDelay = interval;
}
var randomDelay = randomTime(maxRandomDelay);
var totalDelay = interval.plus(randomDelay);
LOG.info("The session #IDU{} (liveId: {}) will be restarted at {}",
userId,
liveId,
Instant.now().plus(totalDelay)
);
// Restart after x time
AtomicReference<Disposable> disposableRef = new AtomicReference<>();
var disposable = Schedulers
.parallel()
.schedule(() -> {
closeManagedByPeriodicRestarter.remove(liveId, disposableRef.get());
LOG.info("The session #IDU{} (liveId: {}) is being stopped", userId, liveId);
if (!requireNonNullElse(closingByPeriodicRestarter.put(liveId, true), false)) {
// Request restart
multiClient
.request(userId, liveId, new Close(), Instant.now().plus(Duration.ofMinutes(5)))
.subscribeOn(Schedulers.parallel())
.retryWhen(Retry.backoff(5, Duration.ofSeconds(1)).maxBackoff(Duration.ofSeconds(5)))
.doOnError(ex -> {
if (ex instanceof TdError tdError && tdError.getTdCode() == 404) {
LOG.warn("Failed to restart bot {} (liveId={}): not found", userId, liveId);
} else {
LOG.error("Failed to restart bot {} (liveId={})", userId, liveId, ex);
}
})
.onErrorResume(ex -> Mono.empty())
.subscribe();
}
}, totalDelay.toMillis(), TimeUnit.MILLISECONDS);
disposableRef.set(disposable);
var prev = closeManagedByPeriodicRestarter.put(liveId, disposable);
if (prev != null) prev.dispose();
}
/**
* @return random duration from 0 to n
*/
private Duration randomTime(Duration max) {
return Duration.ofMillis(ThreadLocalRandom.current().nextInt(0, Math.toIntExact(max.toMillis())));
}
public Mono<Void> stop() {
return Mono.fromRunnable(() -> {
LOG.info("Stopping periodic restarter...");
multiClient.close();
LOG.info("Stopped periodic restarter");
});
}
}

View File

@ -9,29 +9,11 @@ public interface ReactiveApi {
Mono<Void> start();
/**
* Send a request to the cluster to load that user id from disk
*/
Mono<Void> tryReviveSession(long userId);
Mono<CreateSessionResponse> createSession(CreateSessionRequest req);
Mono<Map<Long, String>> getAllUsers();
Set<UserIdAndLiveId> getLocalLiveSessionIds();
boolean is(String nodeId);
/**
* May return empty
*/
Mono<Long> resolveUserLiveId(long userId);
ReactiveApiMultiClient multiClient(String subGroupId);
ReactiveApiClient dynamicClient(String subGroupId, long userId);
ReactiveApiClient liveClient(String subGroupId, long liveId, long userId);
ReactiveApiClient client(String subGroupId, long userId);
Mono<Void> close();
void waitForExit();
}

View File

@ -7,9 +7,6 @@ import static java.util.Objects.requireNonNull;
import static java.util.concurrent.CompletableFuture.completedFuture;
import com.google.common.primitives.Longs;
import io.atomix.cluster.messaging.ClusterEventService;
import io.atomix.cluster.messaging.Subscription;
import io.atomix.core.Atomix;
import it.tdlight.common.Init;
import it.tdlight.common.ReactiveTelegramClient;
import it.tdlight.common.Response;
@ -32,8 +29,8 @@ import it.tdlight.reactiveapi.Event.OnBotLoginCodeRequested;
import it.tdlight.reactiveapi.Event.OnOtherDeviceLoginRequested;
import it.tdlight.reactiveapi.Event.OnPasswordRequested;
import it.tdlight.reactiveapi.Event.OnRequest;
import it.tdlight.reactiveapi.Event.OnRequest.InvalidRequest;
import it.tdlight.reactiveapi.Event.OnRequest.Request;
import it.tdlight.reactiveapi.Event.OnResponse;
import it.tdlight.reactiveapi.Event.OnUpdateData;
import it.tdlight.reactiveapi.Event.OnUpdateError;
import it.tdlight.reactiveapi.Event.OnUserLoginCodeRequested;
@ -57,7 +54,7 @@ import java.util.Set;
import java.util.StringJoiner;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.SerializationException;
import org.apache.kafka.common.errors.SerializationException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
@ -65,47 +62,47 @@ import org.slf4j.LoggerFactory;
import reactor.core.Disposable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.Sinks;
import reactor.core.publisher.Sinks.EmitFailureHandler;
import reactor.core.publisher.Sinks.Many;
import reactor.core.scheduler.Schedulers;
import reactor.util.concurrent.Queues;
public abstract class ReactiveApiPublisher {
private static final Logger LOG = LoggerFactory.getLogger(ReactiveApiPublisher.class);
private static final Duration SPECIAL_RAW_TIMEOUT_DURATION = Duration.ofMinutes(5);
private final KafkaProducer kafkaProducer;
private final ClusterEventService eventService;
private static final Duration TEN_MS = Duration.ofMillis(10);
private final KafkaTdlibServer kafkaTdlibServer;
private final Set<ResultingEventTransformer> resultingEventTransformerSet;
private final ReactiveTelegramClient rawTelegramClient;
private final Flux<Signal> telegramClient;
private final AtomicReference<State> state = new AtomicReference<>(new State(LOGGED_OUT));
protected final long userId;
protected final UserTopic userTopic;
protected final long liveId;
private final String dynamicIdResolveSubject;
private final Many<OnResponse<TdApi.Object>> responses
= Sinks.many().unicast().onBackpressureBuffer(Queues.<OnResponse<TdApi.Object>>small().get());
private final AtomicReference<Disposable> disposable = new AtomicReference<>();
private final AtomicReference<Path> path = new AtomicReference<>();
private ReactiveApiPublisher(Atomix atomix,
KafkaProducer kafkaProducer,
private ReactiveApiPublisher(KafkaTdlibServer kafkaTdlibServer,
Set<ResultingEventTransformer> resultingEventTransformerSet,
long liveId,
long userId) {
this.kafkaProducer = kafkaProducer;
this.eventService = atomix.getEventService();
this.kafkaTdlibServer = kafkaTdlibServer;
this.resultingEventTransformerSet = resultingEventTransformerSet;
this.userId = userId;
this.userTopic = new UserTopic(userId);
this.liveId = liveId;
this.dynamicIdResolveSubject = SubjectNaming.getDynamicIdResolveSubject(userId);
this.rawTelegramClient = ClientManager.createReactive();
try {
Init.start();
} catch (CantLoadLibrary e) {
throw new RuntimeException("Can't load TDLight", e);
}
this.telegramClient = Flux.<Signal>create(sink -> this.registerTopics().thenAccept(subscription -> {
this.telegramClient = Flux.<Signal>create(sink -> {
var subscription = this.registerTopics();
try {
rawTelegramClient.createAndRegisterClient();
} catch (Throwable ex) {
@ -115,31 +112,25 @@ public abstract class ReactiveApiPublisher {
rawTelegramClient.setListener(sink::next);
sink.onCancel(rawTelegramClient::cancel);
sink.onDispose(() -> {
subscription.close();
subscription.dispose();
rawTelegramClient.dispose();
});
}));
});
}
public static ReactiveApiPublisher fromToken(Atomix atomix,
KafkaProducer kafkaProducer,
public static ReactiveApiPublisher fromToken(KafkaTdlibServer kafkaTdlibServer,
Set<ResultingEventTransformer> resultingEventTransformerSet,
Long liveId,
long userId,
String token) {
return new ReactiveApiPublisherToken(atomix, kafkaProducer, resultingEventTransformerSet, liveId, userId, token);
return new ReactiveApiPublisherToken(kafkaTdlibServer, resultingEventTransformerSet, userId, token);
}
public static ReactiveApiPublisher fromPhoneNumber(Atomix atomix,
KafkaProducer kafkaProducer,
public static ReactiveApiPublisher fromPhoneNumber(KafkaTdlibServer kafkaTdlibServer,
Set<ResultingEventTransformer> resultingEventTransformerSet,
Long liveId,
long userId,
long phoneNumber) {
return new ReactiveApiPublisherPhoneNumber(atomix,
kafkaProducer,
return new ReactiveApiPublisherPhoneNumber(kafkaTdlibServer,
resultingEventTransformerSet,
liveId,
userId,
phoneNumber
);
@ -168,33 +159,38 @@ public abstract class ReactiveApiPublisher {
publishedResultingEvents
// Obtain only TDLib-bound events
.filter(s -> s instanceof TDLibBoundResultingEvent<?>)
.map(s -> ((TDLibBoundResultingEvent<?>) s).action())
.map(s -> ((TDLibBoundResultingEvent<?>) s))
// Buffer requests to avoid halting the event loop
.onBackpressureBuffer()
// Send requests to tdlib
.flatMap(function -> Mono
.from(rawTelegramClient.send(function, SPECIAL_RAW_TIMEOUT_DURATION))
.flatMap(result -> fixBrokenKey(function, result))
.flatMap(req -> Mono
.from(rawTelegramClient.send(req.action(), SPECIAL_RAW_TIMEOUT_DURATION))
.flatMap(result -> fixBrokenKey(req.action(), result))
.mapNotNull(resp -> {
if (resp.getConstructor() == TdApi.Error.CONSTRUCTOR) {
LOG.error("Received error for special request {}: {}\nThe instance will be closed", function, resp);
return new OnUpdateError(liveId, userId, (TdApi.Error) resp);
if (req.ignoreFailure()) {
LOG.debug("Received error for special request {}", req.action());
return null;
} else {
LOG.error("Received error for special request {}: {}\nThe instance will be closed", req.action(), resp);
return new OnUpdateError(userId, (TdApi.Error) resp);
}
} else {
return null;
}
})
.doOnError(ex -> LOG.error("Failed to receive the response for special request {}\n"
+ " The instance will be closed", function, ex))
.onErrorResume(ex -> Mono.just(new OnUpdateError(liveId, userId, new TdApi.Error(500, ex.getMessage()))))
+ " The instance will be closed", req.action(), ex))
.onErrorResume(ex -> Mono.just(new OnUpdateError(userId, new TdApi.Error(500, ex.getMessage()))))
, Integer.MAX_VALUE)
// Buffer requests to avoid halting the event loop
.onBackpressureBuffer()
.doOnError(ex -> LOG.error("Failed to receive resulting events. The instance will be closed", ex))
.onErrorResume(ex -> Mono.just(new OnUpdateError(liveId, userId, new TdApi.Error(500, ex.getMessage()))))
.onErrorResume(ex -> Mono.just(new OnUpdateError(userId, new TdApi.Error(500, ex.getMessage()))))
// when an error arrives, close the session
.take(1, true)
@ -214,7 +210,7 @@ public abstract class ReactiveApiPublisher {
// Buffer requests to avoid halting the event loop
.onBackpressureBuffer();
kafkaProducer.sendMessages(userTopic, messagesToSend).subscribeOn(Schedulers.parallel()).subscribe();
kafkaTdlibServer.events().sendMessages(userId, messagesToSend).subscribeOn(Schedulers.parallel()).subscribe();
publishedResultingEvents
// Obtain only cluster-bound events
@ -272,7 +268,7 @@ public abstract class ReactiveApiPublisher {
private ResultingEvent wrapUpdateSignal(Signal signal) {
var update = (TdApi.Update) signal.getUpdate();
return new ClientBoundResultingEvent(new OnUpdateData(liveId, userId, update));
return new ClientBoundResultingEvent(new OnUpdateData(userId, update));
}
private List<ResultingEvent> withUpdateSignal(Signal signal, List<ResultingEvent> list) {
@ -306,8 +302,7 @@ public abstract class ReactiveApiPublisher {
if (signal.isClosed()) {
signal.getClosed();
LOG.info("Received a closed signal");
return List.of(new ClientBoundResultingEvent(new OnUpdateData(liveId,
userId,
return List.of(new ClientBoundResultingEvent(new OnUpdateData(userId,
new TdApi.UpdateAuthorizationState(new AuthorizationStateClosed())
)), new ResultingEventPublisherClosed());
}
@ -360,13 +355,12 @@ public abstract class ReactiveApiPublisher {
case TdApi.AuthorizationStateWaitOtherDeviceConfirmation.CONSTRUCTOR -> {
var link = ((AuthorizationStateWaitOtherDeviceConfirmation) updateAuthorizationState.authorizationState).link;
return List.of(updateResult,
new ClientBoundResultingEvent(new OnOtherDeviceLoginRequested(liveId, userId, link)));
new ClientBoundResultingEvent(new OnOtherDeviceLoginRequested(userId, link)));
}
case TdApi.AuthorizationStateWaitPassword.CONSTRUCTOR -> {
var authorizationStateWaitPassword = ((AuthorizationStateWaitPassword) updateAuthorizationState.authorizationState);
return List.of(updateResult,
new ClientBoundResultingEvent(new OnPasswordRequested(liveId,
userId,
new ClientBoundResultingEvent(new OnPasswordRequested(userId,
authorizationStateWaitPassword.passwordHint,
authorizationStateWaitPassword.hasRecoveryEmailAddress,
authorizationStateWaitPassword.recoveryEmailAddressPattern
@ -440,7 +434,6 @@ public abstract class ReactiveApiPublisher {
private static void writeClientBoundEvent(ClientBoundEvent clientBoundEvent, DataOutputStream dataOutputStream)
throws IOException {
dataOutputStream.writeLong(clientBoundEvent.liveId());
dataOutputStream.writeLong(clientBoundEvent.userId());
dataOutputStream.writeInt(SERIAL_VERSION);
if (clientBoundEvent instanceof OnUpdateData onUpdateData) {
@ -468,19 +461,25 @@ public abstract class ReactiveApiPublisher {
}
}
private CompletableFuture<Subscription> registerTopics() {
// Start receiving requests
eventService.subscribe("session-" + liveId + "-requests",
ReactiveApiPublisher::deserializeRequest,
this::handleRequest,
ReactiveApiPublisher::serializeResponse);
// Start receiving request
return eventService.subscribe(dynamicIdResolveSubject,
b -> null,
r -> completedFuture(liveId),
Longs::toByteArray
);
@SuppressWarnings("unchecked")
private Disposable registerTopics() {
var subscription1 = kafkaTdlibServer.request().consumeMessages("td-requests-handler", userId)
.flatMapSequential(req -> this
.handleRequest(req.data())
.doOnNext(response -> this.responses.emitNext(response, EmitFailureHandler.busyLooping(TEN_MS)))
.then()
)
.subscribeOn(Schedulers.parallel())
.subscribe();
var subscription2 = this.kafkaTdlibServer
.response()
.sendMessages(userId, responses.asFlux())
.subscribeOn(Schedulers.parallel())
.subscribe();
return () -> {
subscription1.dispose();
subscription2.dispose();
};
}
private static byte[] serializeResponse(Response response) {
@ -499,47 +498,42 @@ public abstract class ReactiveApiPublisher {
}
}
private CompletableFuture<Response> handleRequest(OnRequest<Object> onRequestObj) {
private Mono<Event.OnResponse.Response<TdApi.Object>> handleRequest(OnRequest<TdApi.Object> onRequestObj) {
if (onRequestObj instanceof OnRequest.InvalidRequest invalidRequest) {
return completedFuture(new Response(invalidRequest.liveId(), new TdApi.Error(400, "Conflicting protocol version")));
return Mono.just(new Event.OnResponse.Response<>(invalidRequest.clientId(),
invalidRequest.requestId(),
userId,
new TdApi.Error(400, "Conflicting protocol version")
));
}
var requestObj = (Request<Object>) onRequestObj;
if (liveId != requestObj.liveId()) {
LOG.error("Received a request for another session!");
return completedFuture(new Response(liveId,
new TdApi.Error(400, "The request live id is different than the current live id")
));
} else {
var requestWithTimeoutInstant = new RequestWithTimeoutInstant<>(requestObj.request(), requestObj.timeout());
var state = this.state.get();
if (state.authPhase() == LOGGED_IN) {
var request = requestWithTimeoutInstant.request();
var timeoutDuration = Duration.between(Instant.now(), requestWithTimeoutInstant.timeout());
if (timeoutDuration.isZero() || timeoutDuration.isNegative()) {
LOG.error("Received an expired request. Expiration: {}", requestWithTimeoutInstant.timeout());
}
return Mono
.from(rawTelegramClient.send(request, timeoutDuration))
.map(responseObj -> new Response(liveId, responseObj))
.publishOn(Schedulers.boundedElastic())
.toFuture();
} else {
LOG.error("Ignored a request because the current state is {}. Request: {}", state, requestObj);
return completedFuture(new Response(liveId, new TdApi.Error(503, "Service Unavailable: " + state)));
var requestWithTimeoutInstant = new RequestWithTimeoutInstant<>(requestObj.request(), requestObj.timeout());
var state = this.state.get();
if (state.authPhase() == LOGGED_IN) {
var request = requestWithTimeoutInstant.request();
var timeoutDuration = Duration.between(Instant.now(), requestWithTimeoutInstant.timeout());
if (timeoutDuration.isZero() || timeoutDuration.isNegative()) {
LOG.warn("Received an expired request. Expiration: {}", requestWithTimeoutInstant.timeout());
}
}
}
private static <T extends TdApi.Object> OnRequest<T> deserializeRequest(byte[] bytes) {
return OnRequest.deserialize(new DataInputStream(new ByteArrayInputStream(bytes)));
return Mono
.from(rawTelegramClient.send(request, timeoutDuration))
.map(responseObj -> new Event.OnResponse.Response<>(onRequestObj.clientId(),
onRequestObj.requestId(),
userId, responseObj))
.publishOn(Schedulers.parallel());
} else {
LOG.error("Ignored a request because the current state is {}. Request: {}", state, requestObj);
return Mono.just(new Event.OnResponse.Response<>(onRequestObj.clientId(),
onRequestObj.requestId(),
userId, new TdApi.Error(503, "Service Unavailable: " + state)));
}
}
@Override
public String toString() {
return new StringJoiner(", ", ReactiveApiPublisher.class.getSimpleName() + "[", "]")
.add("userId=" + userId)
.add("liveId=" + liveId)
.toString();
}
@ -549,13 +543,11 @@ public abstract class ReactiveApiPublisher {
private final String botToken;
public ReactiveApiPublisherToken(Atomix atomix,
KafkaProducer kafkaProducer,
public ReactiveApiPublisherToken(KafkaTdlibServer kafkaTdlibServer,
Set<ResultingEventTransformer> resultingEventTransformerSet,
Long liveId,
long userId,
String botToken) {
super(atomix, kafkaProducer, resultingEventTransformerSet, liveId, userId);
super(kafkaTdlibServer, resultingEventTransformerSet, userId);
this.botToken = botToken;
}
@ -573,7 +565,6 @@ public abstract class ReactiveApiPublisher {
public String toString() {
return new StringJoiner(", ", ReactiveApiPublisherToken.class.getSimpleName() + "[", "]")
.add("userId=" + userId)
.add("liveId=" + liveId)
.add("token='" + botToken + "'")
.toString();
}
@ -583,13 +574,11 @@ public abstract class ReactiveApiPublisher {
private final long phoneNumber;
public ReactiveApiPublisherPhoneNumber(Atomix atomix,
KafkaProducer kafkaProducer,
public ReactiveApiPublisherPhoneNumber(KafkaTdlibServer kafkaTdlibServer,
Set<ResultingEventTransformer> resultingEventTransformerSet,
Long liveId,
long userId,
long phoneNumber) {
super(atomix, kafkaProducer, resultingEventTransformerSet, liveId, userId);
super(kafkaTdlibServer, resultingEventTransformerSet, userId);
this.phoneNumber = phoneNumber;
}
@ -611,14 +600,13 @@ public abstract class ReactiveApiPublisher {
@Override
public List<ResultingEvent> onWaitCode() {
return List.of(new ClientBoundResultingEvent(new OnUserLoginCodeRequested(liveId, userId, phoneNumber)));
return List.of(new ClientBoundResultingEvent(new OnUserLoginCodeRequested(userId, phoneNumber)));
}
@Override
public String toString() {
return new StringJoiner(", ", ReactiveApiPublisherPhoneNumber.class.getSimpleName() + "[", "]")
.add("userId=" + userId)
.add("liveId=" + liveId)
.add("phoneNumber=" + phoneNumber)
.toString();
}

View File

@ -11,7 +11,13 @@ public sealed interface ResultingEvent permits ClientBoundResultingEvent, TDLibB
record ClientBoundResultingEvent(ClientBoundEvent event) implements ResultingEvent {}
record TDLibBoundResultingEvent<T extends TdApi.Object>(TdApi.Function<T> action) implements ResultingEvent {}
record TDLibBoundResultingEvent<T extends TdApi.Object>(TdApi.Function<T> action, boolean ignoreFailure) implements
ResultingEvent {
public TDLibBoundResultingEvent(TdApi.Function<T> action) {
this(action, false);
}
}
sealed interface ClusterBoundResultingEvent extends ResultingEvent permits ResultingEventPublisherClosed {}

View File

@ -1,8 +0,0 @@
package it.tdlight.reactiveapi;
public class SubjectNaming {
public static String getDynamicIdResolveSubject(long userId) {
return "session-" + userId + "-dynamic-live-id-resolve";
}
}

View File

@ -1,63 +0,0 @@
package it.tdlight.reactiveapi;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.KryoDataInput;
import com.esotericsoftware.kryo.io.KryoDataOutput;
import com.esotericsoftware.kryo.io.Output;
import io.atomix.primitive.serialization.SerializationService;
import it.tdlight.common.ConstructorDetector;
import it.tdlight.jni.TdApi;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.lang.reflect.Modifier;
import java.util.stream.Stream;
import org.apache.commons.lang3.SerializationException;
public class TdSerializer extends Serializer<TdApi.Object> {
private TdSerializer() {
}
public static void register(SerializationService serializationService) {
var serializerBuilder = serializationService.newBuilder("TdApi");
var tdApiClasses = TdApi.class.getDeclaredClasses();
// Add types
Class<?>[] classes = Stream
.of(tdApiClasses)
.filter(clazz -> clazz.isAssignableFrom(TdApi.Object.class))
.toArray(Class<?>[]::new);
serializerBuilder.addSerializer(new TdSerializer(), classes);
}
@Override
public void write(Kryo kryo, Output output, TdApi.Object object) {
try {
object.serialize(new KryoDataOutput(output));
} catch (IOException e) {
throw new SerializationException(e);
}
}
@Override
public TdApi.Object read(Kryo kryo, Input input, Class<TdApi.Object> type) {
try {
return TdApi.Deserializer.deserialize(new KryoDataInput(input));
} catch (IOException e) {
throw new SerializationException(e);
}
}
public static TdApi.Object deserializeBytes(byte[] bytes) {
var din = new DataInputStream(new ByteArrayInputStream(bytes));
try {
return TdApi.Deserializer.deserialize(din);
} catch (IOException e) {
throw new SerializationException(e);
}
}
}

View File

@ -0,0 +1,31 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import it.tdlight.jni.TdApi;
import it.tdlight.jni.TdApi.Object;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Deserializer;
public class TdlibDeserializer implements Deserializer<Object> {
@Override
public Object deserialize(String topic, byte[] data) {
if (data.length == 0) {
return null;
}
var bais = new ByteArrayInputStream(data);
var dais = new DataInputStream(bais);
try {
if (dais.readInt() != SERIAL_VERSION) {
return new TdApi.Error(400, "Conflicting protocol version");
}
return TdApi.Deserializer.deserialize(dais);
} catch (IOException e) {
throw new SerializationException("Failed to deserialize TDLib object", e);
}
}
}

View File

@ -0,0 +1,51 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.OnRequest;
import it.tdlight.reactiveapi.Event.OnRequest.InvalidRequest;
import it.tdlight.reactiveapi.Event.OnRequest.Request;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serializer;
public class TdlibRequestDeserializer<T extends TdApi.Object> implements Deserializer<OnRequest<T>> {
@Override
public OnRequest<T> deserialize(String topic, byte[] data) {
if (data.length == 0) {
return null;
}
try {
var bais = new ByteArrayInputStream(data);
var dais = new DataInputStream(bais);
var clientId = dais.readLong();
var requestId = dais.readLong();
if (dais.readInt() != SERIAL_VERSION) {
// Deprecated request
return new InvalidRequest<>(clientId, requestId);
} else {
long millis = dais.readLong();
Instant timeout;
if (millis == -1) {
timeout = Instant.ofEpochMilli(Long.MAX_VALUE);
} else {
timeout = Instant.ofEpochMilli(millis);
}
@SuppressWarnings("unchecked")
TdApi.Function<T> request = (TdApi.Function<T>) TdApi.Deserializer.deserialize(dais);
return new Request<>(clientId, requestId, request, timeout);
}
} catch (UnsupportedOperationException | IOException e) {
throw new SerializationException(e);
}
}
}

View File

@ -0,0 +1,52 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.OnRequest;
import it.tdlight.reactiveapi.Event.OnRequest.Request;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Serializer;
public class TdlibRequestSerializer<T extends TdApi.Object> implements Serializer<OnRequest<T>> {
private static final Instant INFINITE_TIMEOUT = Instant.now().plus(100_000, ChronoUnit.DAYS);
@Override
public byte[] serialize(String topic, OnRequest<T> data) {
try {
if (data == null) {
return new byte[0];
} else {
try(var baos = new ByteArrayOutputStream()) {
try (var daos = new DataOutputStream(baos)) {
daos.writeLong(data.clientId());
daos.writeLong(data.requestId());
daos.writeInt(SERIAL_VERSION);
if (data instanceof OnRequest.Request<?> request) {
if (request.timeout() == Instant.MAX || request.timeout().compareTo(INFINITE_TIMEOUT) >= 0) {
daos.writeLong(-1);
} else {
daos.writeLong(request.timeout().toEpochMilli());
}
request.request().serialize(daos);
} else if (data instanceof OnRequest.InvalidRequest<?>) {
daos.writeLong(-2);
} else {
throw new SerializationException("Unknown request type: " + daos.getClass());
}
daos.flush();
return baos.toByteArray();
}
}
}
} catch (IOException e) {
throw new SerializationException("Failed to serialize TDLib object", e);
}
}
}

View File

@ -0,0 +1,41 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.OnResponse;
import it.tdlight.reactiveapi.Event.OnResponse.InvalidResponse;
import it.tdlight.reactiveapi.Event.OnResponse.Response;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.time.Instant;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Deserializer;
public class TdlibResponseDeserializer<T extends TdApi.Object> implements Deserializer<OnResponse<T>> {
@Override
public OnResponse<T> deserialize(String topic, byte[] data) {
if (data.length == 0) {
return null;
}
try {
var bais = new ByteArrayInputStream(data);
var dais = new DataInputStream(bais);
var clientId = dais.readLong();
var requestId = dais.readLong();
var userId = dais.readLong();
if (dais.readInt() != SERIAL_VERSION) {
// Deprecated response
return new InvalidResponse<>(clientId, requestId, userId);
} else {
@SuppressWarnings("unchecked")
T response = (T) TdApi.Deserializer.deserialize(dais);
return new Response<>(clientId, requestId, userId, response);
}
} catch (UnsupportedOperationException | IOException e) {
throw new SerializationException(e);
}
}
}

View File

@ -0,0 +1,46 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import it.tdlight.jni.TdApi;
import it.tdlight.reactiveapi.Event.OnResponse;
import it.tdlight.reactiveapi.Event.OnResponse.Response;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Serializer;
public class TdlibResponseSerializer<T extends TdApi.Object> implements Serializer<OnResponse<T>> {
@Override
public byte[] serialize(String topic, OnResponse<T> data) {
try {
if (data == null) {
return new byte[0];
} else {
try(var baos = new ByteArrayOutputStream()) {
try (var daos = new DataOutputStream(baos)) {
daos.writeLong(data.clientId());
daos.writeLong(data.requestId());
daos.writeLong(data.userId());
daos.writeInt(SERIAL_VERSION);
if (data instanceof Response<?> response) {
response.response().serialize(daos);
} else if (data instanceof OnResponse.InvalidResponse<T>) {
daos.writeLong(-2);
} else {
throw new SerializationException("Unknown response type: " + daos.getClass());
}
daos.flush();
return baos.toByteArray();
}
}
}
} catch (IOException e) {
throw new SerializationException("Failed to serialize TDLib object", e);
}
}
}

View File

@ -0,0 +1,37 @@
package it.tdlight.reactiveapi;
import static it.tdlight.reactiveapi.Event.SERIAL_VERSION;
import it.tdlight.jni.TdApi;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serializer;
public class TdlibSerializer implements Serializer<TdApi.Object> {
@Override
public byte[] serialize(String topic, TdApi.Object data) {
try {
if (data == null) {
return new byte[0];
} else {
try(var baos = new ByteArrayOutputStream()) {
try (var daos = new DataOutputStream(baos)) {
daos.writeInt(SERIAL_VERSION);
data.serialize(daos);
daos.flush();
return baos.toByteArray();
}
}
}
} catch (IOException e) {
throw new SerializationException("Failed to serialize TDLib object", e);
}
}
}

View File

@ -0,0 +1,3 @@
package it.tdlight.reactiveapi;
public record Timestamped<T>(long timestamp, T data) {}

View File

@ -1,5 +0,0 @@
package it.tdlight.reactiveapi;
import it.tdlight.reactiveapi.Event.ClientBoundEvent;
public record TimestampedClientBoundEvent(long timestamp, ClientBoundEvent event) {}

View File

@ -1,14 +1,40 @@
package it.tdlight.reactiveapi;
import java.util.Objects;
public class UserTopic {
private final String value;
public UserTopic(long userId) {
value = "tdlib.event.%d".formatted(userId);
public UserTopic(KafkaChannelName channelName, long userId) {
value = "tdlib.%s.%d".formatted(channelName.getKafkaName(), userId);
}
public String getTopic() {
return value;
}
@Override
public String toString() {
return value;
}
@Override
public int hashCode() {
return value.hashCode();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UserTopic userTopic = (UserTopic) o;
return Objects.equals(value, userTopic.value);
}
}

View File

@ -29,7 +29,7 @@ public class EnableMinithumbnails implements ResultingEventTransformer {
resultingEvent.add(event);
// Enable minithumbnails
resultingEvent.add(new TDLibBoundResultingEvent<>(new TdApi.SetOption("disable_minithumbnails",
new OptionValueBoolean(false))));
new OptionValueBoolean(false)), true));
return resultingEvent;
} else {
// Return just the intercepted event as-is

View File

@ -26,8 +26,8 @@ public class LoadChats implements ResultingEventTransformer {
&& onUpdate.update() instanceof TdApi.UpdateAuthorizationState authorizationState
&& authorizationState.authorizationState instanceof TdApi.AuthorizationStateReady) {
return List.of(event,
new TDLibBoundResultingEvent<>(new TdApi.LoadChats(new ChatListMain(), 500)),
new TDLibBoundResultingEvent<>(new TdApi.LoadChats(new ChatListArchive(), 500))
new TDLibBoundResultingEvent<>(new TdApi.LoadChats(new ChatListMain(), 500), true),
new TDLibBoundResultingEvent<>(new TdApi.LoadChats(new ChatListArchive(), 500), true)
);
}

View File

@ -4,11 +4,7 @@ module tdlib.reactive.api {
requires com.fasterxml.jackson.annotation;
requires org.jetbrains.annotations;
requires org.slf4j;
requires atomix.cluster;
requires tdlight.java;
requires reactor.core;
requires atomix;
requires org.apache.commons.lang3;
requires org.reactivestreams;
requires tdlight.api;
requires com.google.common;
@ -16,17 +12,14 @@ module tdlib.reactive.api {
requires kafka.clients;
requires org.apache.logging.log4j;
requires reactor.kafka;
requires atomix.raft;
requires atomix.utils;
requires com.fasterxml.jackson.databind;
requires com.fasterxml.jackson.dataformat.yaml;
requires static io.soabase.recordbuilder.core;
requires kryo;
requires atomix.primitive;
requires java.compiler;
requires it.unimi.dsi.fastutil;
requires net.minecrell.terminalconsole;
requires org.jline.reader;
requires jdk.unsupported;
requires jakarta.xml.bind;
requires reactor.core;
}

View File

@ -14,11 +14,9 @@
<AsyncLogger name="reactor.kafka" level="WARN" additivity="false"/>
<AsyncLogger name="org.apache.kafka" level="WARN" additivity="false"/>
<!-- log only INFO, WARN, ERROR and FATAL logging by classes in this package -->
<Logger name="io.atomix" level="INFO" additivity="false"/>
<!-- log only INFO, WARN, ERROR and FATAL logging by classes in this package -->
<Logger name="io.netty" level="INFO" additivity="false"/>
<Root level="INFO">
<Root level="DEBUG">
<filters>
<MarkerFilter marker="NETWORK_PACKETS" onMatch="DENY" onMismatch="NEUTRAL"/>
</filters>