Fix out-of-order updates

This commit is contained in:
Andrea Cavalli 2021-01-25 03:36:15 +01:00
parent 620914b3cf
commit 5074d985b3
8 changed files with 31 additions and 30 deletions

View File

@ -216,8 +216,7 @@
<version>3.8.1</version> <version>3.8.1</version>
<configuration> <configuration>
<release>11</release> <release>11</release>
<source>11</source> <useIncrementalCompilation>false</useIncrementalCompilation>
<target>11</target>
</configuration> </configuration>
</plugin> </plugin>
<plugin> <plugin>

View File

@ -95,7 +95,7 @@ public class EventBusFlux {
}) })
.then(Mono.empty()) .then(Mono.empty())
) )
.flatMap(item -> Mono.<Message<T>>create(itemSink -> { .flatMapSequential(item -> Mono.<Message<T>>create(itemSink -> {
var responseHandler = MonoUtils.toHandler(itemSink); var responseHandler = MonoUtils.toHandler(itemSink);
eventBus.request(subscriptionAddress + ".signal", SignalMessage.<T>onNext(item), signalDeliveryOptions, responseHandler); eventBus.request(subscriptionAddress + ".signal", SignalMessage.<T>onNext(item), signalDeliveryOptions, responseHandler);
})) }))
@ -292,7 +292,7 @@ public class EventBusFlux {
} }
}); });
var pingSubscription = Flux.interval(Duration.ofSeconds(10)).flatMap(n -> Mono.create(pingSink -> var pingSubscription = Flux.interval(Duration.ofSeconds(10)).flatMapSequential(n -> Mono.create(pingSink ->
eventBus.<byte[]>request(subscriptionAddress + ".ping", EMPTY, deliveryOptions, pingMsg -> { eventBus.<byte[]>request(subscriptionAddress + ".ping", EMPTY, deliveryOptions, pingMsg -> {
if (pingMsg.succeeded()) { if (pingMsg.succeeded()) {
pingSink.success(pingMsg.result().body()); pingSink.success(pingMsg.result().body());

View File

@ -165,19 +165,21 @@ public class TDLibRemoteClient implements AutoCloseable {
var mediaPath = getMediaDirectory(req.id()); var mediaPath = getMediaDirectory(req.id());
var blPath = getSessionBinlogDirectory(req.id()); var blPath = getSessionBinlogDirectory(req.id());
BinlogUtils Schedulers.boundedElastic().schedule(() -> {
.chooseBinlog(clusterManager.getVertx().fileSystem(), blPath, req.binlog(), req.binlogDate()) BinlogUtils
.then(BinlogUtils.cleanSessionPath(clusterManager.getVertx().fileSystem(), blPath, sessPath, mediaPath)) .chooseBinlog(clusterManager.getVertx().fileSystem(), blPath, req.binlog(), req.binlogDate())
.then(clusterManager.getVertx().rxDeployVerticle(verticle, deploymentOptions).as(MonoUtils::toMono)) .then(BinlogUtils.cleanSessionPath(clusterManager.getVertx().fileSystem(), blPath, sessPath, mediaPath))
.subscribeOn(Schedulers.single()) .then(clusterManager.getVertx().rxDeployVerticle(verticle, deploymentOptions).as(MonoUtils::toMono))
.subscribe( .publishOn(Schedulers.single())
v -> {}, .subscribe(
ex -> { v -> {},
logger.error("Failed to deploy bot verticle", ex); ex -> {
msg.fail(500, "Failed to deploy bot verticle: " + ex.getMessage()); logger.error("Failed to deploy bot verticle", ex);
}, msg.fail(500, "Failed to deploy bot verticle: " + ex.getMessage());
() -> msg.reply(new byte[0]) },
); () -> msg.reply(new byte[0])
);
});
}); });
return startBotConsumer.rxCompletionHandler().as(MonoUtils::toMono); return startBotConsumer.rxCompletionHandler().as(MonoUtils::toMono);
}) })

View File

@ -75,8 +75,8 @@ public class AsyncTdEasy {
// todo: use Duration.ZERO instead of 10ms interval // todo: use Duration.ZERO instead of 10ms interval
this.incomingUpdates = td.receive() this.incomingUpdates = td.receive()
.flatMap(this::preprocessUpdates) .flatMapSequential(this::preprocessUpdates)
.flatMap(update -> Mono.from(this.getState()).single().map(state -> new AsyncTdUpdateObj(state, update))) .flatMapSequential(update -> Mono.from(this.getState()).single().map(state -> new AsyncTdUpdateObj(state, update)))
.map(upd -> (TdApi.Update) upd.getUpdate()) .map(upd -> (TdApi.Update) upd.getUpdate())
.doOnError(ex -> { .doOnError(ex -> {
if (ex instanceof TdError) { if (ex instanceof TdError) {

View File

@ -191,8 +191,8 @@ public class AsyncTdMiddleEventBusClient implements AsyncTdMiddle {
.doOnSubscribe(s -> Schedulers.boundedElastic().schedule(() -> { .doOnSubscribe(s -> Schedulers.boundedElastic().schedule(() -> {
cluster.getEventBus().<byte[]>send(botAddress + ".ready-to-receive", EMPTY, deliveryOptionsWithTimeout); cluster.getEventBus().<byte[]>send(botAddress + ".ready-to-receive", EMPTY, deliveryOptionsWithTimeout);
})) }))
.flatMap(updates -> Mono.fromCallable((Callable<Object>) updates::body).publishOn(Schedulers.boundedElastic())) .flatMapSequential(updates -> Mono.fromCallable((Callable<Object>) updates::body).publishOn(Schedulers.boundedElastic()))
.flatMap(updates -> { .flatMapSequential(updates -> {
var result = (TdResultList) updates; var result = (TdResultList) updates;
if (result.succeeded()) { if (result.succeeded()) {
return Flux.fromIterable(result.value()); return Flux.fromIterable(result.value());
@ -200,7 +200,7 @@ public class AsyncTdMiddleEventBusClient implements AsyncTdMiddle {
return Mono.fromCallable(() -> TdResult.failed(result.error()).orElseThrow()); return Mono.fromCallable(() -> TdResult.failed(result.error()).orElseThrow());
} }
}) })
.flatMap(this::interceptUpdate) .flatMapSequential(this::interceptUpdate)
.doOnError(crash::tryEmitError) .doOnError(crash::tryEmitError)
.doOnTerminate(updatesStreamEnd::tryEmitEmpty) .doOnTerminate(updatesStreamEnd::tryEmitEmpty)
.publishOn(Schedulers.single()); .publishOn(Schedulers.single());

View File

@ -141,7 +141,7 @@ public class AsyncTdMiddleEventBusServer extends AbstractVerticle {
executeConsumer.handler(sink::next); executeConsumer.handler(sink::next);
executeConsumer.endHandler(h -> sink.complete()); executeConsumer.endHandler(h -> sink.complete());
}) })
.flatMap(msg -> { .flatMapSequential(msg -> {
logger.trace("Received execute request {}", msg.body()); logger.trace("Received execute request {}", msg.body());
var request = overrideRequest(msg.body().getRequest(), botId); var request = overrideRequest(msg.body().getRequest(), botId);
return td return td
@ -324,7 +324,7 @@ public class AsyncTdMiddleEventBusServer extends AbstractVerticle {
} }
return false; return false;
}) })
.flatMap(update -> Mono.fromCallable(() -> { .flatMapSequential(update -> Mono.fromCallable(() -> {
if (update.getConstructor() == TdApi.Error.CONSTRUCTOR) { if (update.getConstructor() == TdApi.Error.CONSTRUCTOR) {
var error = (Error) update; var error = (Error) update;
throw new TdError(error.code, error.message); throw new TdError(error.code, error.message);
@ -351,14 +351,14 @@ public class AsyncTdMiddleEventBusServer extends AbstractVerticle {
.sender(botAddress + ".updates", opts); .sender(botAddress + ".updates", opts);
var pipeFlux = updatesFlux var pipeFlux = updatesFlux
.flatMap(updatesList -> updatesSender .flatMapSequential(updatesList -> updatesSender
.rxWrite(updatesList) .rxWrite(updatesList)
.as(MonoUtils::toMono) .as(MonoUtils::toMono)
.thenReturn(updatesList) .thenReturn(updatesList)
) )
.flatMap(updatesList -> Flux .flatMapSequential(updatesList -> Flux
.fromIterable(updatesList.value()) .fromIterable(updatesList.value())
.flatMap(item -> { .flatMapSequential(item -> {
if (item instanceof Update) { if (item instanceof Update) {
var tdUpdate = (Update) item; var tdUpdate = (Update) item;
if (tdUpdate.getConstructor() == UpdateAuthorizationState.CONSTRUCTOR) { if (tdUpdate.getConstructor() == UpdateAuthorizationState.CONSTRUCTOR) {

View File

@ -117,7 +117,7 @@ public class BinlogUtils {
readBinlogConsumer.handler(sink::next); readBinlogConsumer.handler(sink::next);
readBinlogConsumer.endHandler(h -> sink.complete()); readBinlogConsumer.endHandler(h -> sink.complete());
}) })
.flatMap(req -> BinlogUtils .flatMapSequential(req -> BinlogUtils
.retrieveBinlog(vertx.fileSystem(), TDLibRemoteClient.getSessionBinlogDirectory(botId)) .retrieveBinlog(vertx.fileSystem(), TDLibRemoteClient.getSessionBinlogDirectory(botId))
.flatMap(BinlogAsyncFile::readFullyBytes) .flatMap(BinlogAsyncFile::readFullyBytes)
.single() .single()

View File

@ -407,7 +407,7 @@ public class MonoUtils {
} }
public Flux<T> readAsFlux() { public Flux<T> readAsFlux() {
return sink.asFlux(); return sink.asFlux().publishOn(Schedulers.parallel());
} }
public ReactiveReactorReadStream<T> readAsStream() { public ReactiveReactorReadStream<T> readAsStream() {
@ -552,7 +552,7 @@ public class MonoUtils {
} }
public Flux<T> readAsFlux() { public Flux<T> readAsFlux() {
return flux; return flux.publishOn(Schedulers.parallel());
} }
public ReactiveReactorReadStream<T> readAsStream() { public ReactiveReactorReadStream<T> readAsStream() {