Quick/Slow response mode, no acks, filtering instead of grouping

This commit is contained in:
Andrea Cavalli 2022-09-10 23:12:35 +02:00
parent bd463a74d2
commit 83613b2d01
7 changed files with 52 additions and 41 deletions

View File

@ -13,4 +13,9 @@ public class KafkaClientBoundConsumer extends KafkaConsumer<ClientBoundEvent> {
return KafkaChannelName.CLIENT_BOUND_EVENT;
}
@Override
public boolean isQuickResponse() {
return false;
}
}

View File

@ -49,15 +49,16 @@ public abstract class KafkaConsumer<K> {
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, getChannelName().getDeserializerClass());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, toIntExact(Duration.ofMinutes(5).toMillis()));
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "1048576");
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100");
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "10000");
ReceiverOptions<Integer, K> receiverOptions = ReceiverOptions
.<Integer, K>create(props)
.commitInterval(Duration.ofSeconds(10))
.commitBatchSize(65535)
.maxCommitAttempts(100)
.maxDeferredCommits(100);
if (isQuickResponse()) {
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "5000");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
} else {
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "10000");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "1048576");
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100");
}
ReceiverOptions<Integer, K> receiverOptions = ReceiverOptions.create(props);
Pattern pattern;
if (userId == null) {
pattern = Pattern.compile("tdlib\\." + getChannelName() + "\\.\\d+");
@ -73,6 +74,8 @@ public abstract class KafkaConsumer<K> {
public abstract KafkaChannelName getChannelName();
public abstract boolean isQuickResponse();
protected Flux<Timestamped<K>> retryIfCleanup(Flux<Timestamped<K>> eventFlux) {
return eventFlux.retryWhen(Retry
.backoff(Long.MAX_VALUE, Duration.ofMillis(100))
@ -114,7 +117,7 @@ public abstract class KafkaConsumer<K> {
SignalType.ON_ERROR,
SignalType.ON_COMPLETE
)
.doOnNext(result -> result.receiverOffset().acknowledge())
//.doOnNext(result -> result.receiverOffset().acknowledge())
.map(record -> {
if (record.timestampType() == TimestampType.CREATE_TIME) {
return new Timestamped<>(record.timestamp(), record.value());

View File

@ -26,7 +26,8 @@ public abstract class KafkaProducer<K> {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaParameters.bootstrapServers());
props.put(ProducerConfig.CLIENT_ID_CONFIG, kafkaParameters.clientId());
props.put(ProducerConfig.ACKS_CONFIG, "1");
//props.put(ProducerConfig.ACKS_CONFIG, "1");
props.put(ProducerConfig.ACKS_CONFIG, "0");
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);

View File

@ -15,6 +15,7 @@ import java.util.logging.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import reactor.core.Disposable;
import reactor.core.publisher.BufferOverflowStrategy;
import reactor.core.publisher.Flux;
import reactor.core.publisher.GroupedFlux;
import reactor.core.publisher.SignalType;
@ -31,23 +32,18 @@ public class KafkaSharedTdlibClients implements Closeable {
private final AtomicReference<Disposable> responsesSub = new AtomicReference<>();
private final Disposable requestsSub;
private final AtomicReference<Disposable> eventsSub = new AtomicReference<>();
private final Flux<GroupedFlux<Long, Timestamped<OnResponse<Object>>>> responses;
private final Flux<GroupedFlux<Long, Timestamped<ClientBoundEvent>>> events;
private final Flux<Timestamped<OnResponse<Object>>> responses;
private final Flux<Timestamped<ClientBoundEvent>> events;
private final Many<OnRequest<?>> requests = Sinks.many().unicast()
.onBackpressureBuffer(Queues.<OnRequest<?>>get(65535).get());
public KafkaSharedTdlibClients(KafkaTdlibClientsChannels kafkaTdlibClientsChannels) {
this.kafkaTdlibClientsChannels = kafkaTdlibClientsChannels;
this.responses = kafkaTdlibClientsChannels.response().consumeMessages("td-responses")
.onBackpressureBuffer()
.groupBy(k1 -> k1.data().clientId(), 1)
.replay()
.publish()
.autoConnect(1, this.responsesSub::set);
this.events = kafkaTdlibClientsChannels.events().consumeMessages("td-handler")
.onBackpressureBuffer()
.groupBy(k -> k.data().userId(), 1)
.doOnNext(g -> LOG.info("Receiving updates of client: {}", g.key()))
.replay()
.publish()
.autoConnect(1, this.eventsSub::set);
this.requestsSub = kafkaTdlibClientsChannels.request()
.sendMessages(0L, requests.asFlux())
@ -56,20 +52,18 @@ public class KafkaSharedTdlibClients implements Closeable {
}
public Flux<Timestamped<OnResponse<Object>>> responses(long clientId) {
return responses.filter(group -> group.key() == clientId)
.take(1, true)
.singleOrEmpty()
.flatMapMany(Function.identity())
.log("req-" + clientId, Level.FINE, SignalType.REQUEST);
return responses
.filter(group -> group.data().clientId() == clientId)
//.onBackpressureBuffer(8192, BufferOverflowStrategy.DROP_OLDEST)
.log("req-" + clientId, Level.FINEST, SignalType.REQUEST);
}
public Flux<Timestamped<ClientBoundEvent>> events(long userId) {
return events.filter(group -> group.key() == userId)
.take(1, true)
.singleOrEmpty()
.flatMapMany(Function.identity())
.doOnSubscribe(s -> LOG.info("Reading updates of client: {}", userId));
//.log("event-" + userId, Level.FINE, SignalType.REQUEST);
return events
.filter(group -> group.data().userId() == userId)
//.onBackpressureBuffer(8192, BufferOverflowStrategy.DROP_OLDEST)
.doOnSubscribe(s -> LOG.info("Reading updates of client: {}", userId))
.log("event-" + userId, Level.FINEST, SignalType.REQUEST);
}
public Many<OnRequest<?>> requests() {

View File

@ -14,6 +14,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.logging.Level;
import reactor.core.Disposable;
import reactor.core.publisher.BufferOverflowStrategy;
import reactor.core.publisher.Flux;
import reactor.core.publisher.GroupedFlux;
import reactor.core.publisher.SignalType;
@ -29,28 +30,25 @@ public class KafkaSharedTdlibServers implements Closeable {
private final AtomicReference<Disposable> requestsSub = new AtomicReference<>();
private final Many<OnResponse<TdApi.Object>> responses = Sinks.many().unicast().onBackpressureBuffer(
Queues.<OnResponse<TdApi.Object>>get(65535).get());
private final Flux<GroupedFlux<Long, Timestamped<OnRequest<Object>>>> requests;
private final Flux<Timestamped<OnRequest<Object>>> requests;
public KafkaSharedTdlibServers(KafkaTdlibServersChannels kafkaTdlibServersChannels) {
this.kafkaTdlibServersChannels = kafkaTdlibServersChannels;
this.responsesSub = kafkaTdlibServersChannels.response()
.sendMessages(0L, responses.asFlux())
.sendMessages(0L, responses.asFlux().log("responses", Level.FINEST, SignalType.ON_NEXT))
.subscribeOn(Schedulers.parallel())
.subscribe();
this.requests = kafkaTdlibServersChannels.request()
.consumeMessages("td-requests")
.onBackpressureBuffer()
.groupBy(k -> k.data().userId(), 1)
.replay()
.publish()
.autoConnect(1, this.requestsSub::set);
}
public Flux<Timestamped<OnRequest<Object>>> requests(long userId) {
return requests.filter(group -> group.key() == userId)
.take(1, true)
.singleOrEmpty()
.flatMapMany(Function.identity())
.log("req-" + userId, Level.FINE, SignalType.REQUEST, SignalType.ON_NEXT);
return requests
.filter(group -> group.data().userId() == userId)
//.onBackpressureBuffer(8192, BufferOverflowStrategy.DROP_OLDEST)
.log("requests-" + userId, Level.FINEST, SignalType.REQUEST, SignalType.ON_NEXT);
}
public Disposable events(Flux<ClientBoundEvent> eventFlux) {

View File

@ -16,4 +16,9 @@ public class KafkaTdlibRequestConsumer extends KafkaConsumer<OnRequest<TdApi.Obj
return KafkaChannelName.TDLIB_REQUEST;
}
@Override
public boolean isQuickResponse() {
return true;
}
}

View File

@ -15,4 +15,9 @@ public class KafkaTdlibResponseConsumer extends KafkaConsumer<OnResponse<TdApi.O
return KafkaChannelName.TDLIB_RESPONSE;
}
@Override
public boolean isQuickResponse() {
return true;
}
}