Quick/Slow response mode, no acks, filtering instead of grouping

This commit is contained in:
Andrea Cavalli 2022-09-10 23:12:35 +02:00
parent bd463a74d2
commit 83613b2d01
7 changed files with 52 additions and 41 deletions

View File

@ -13,4 +13,9 @@ public class KafkaClientBoundConsumer extends KafkaConsumer<ClientBoundEvent> {
return KafkaChannelName.CLIENT_BOUND_EVENT; return KafkaChannelName.CLIENT_BOUND_EVENT;
} }
@Override
public boolean isQuickResponse() {
return false;
}
} }

View File

@ -49,15 +49,16 @@ public abstract class KafkaConsumer<K> {
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, getChannelName().getDeserializerClass()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, getChannelName().getDeserializerClass());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, toIntExact(Duration.ofMinutes(5).toMillis())); props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, toIntExact(Duration.ofMinutes(5).toMillis()));
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "1048576"); if (isQuickResponse()) {
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100"); props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "5000");
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "10000"); props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
ReceiverOptions<Integer, K> receiverOptions = ReceiverOptions } else {
.<Integer, K>create(props) props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "10000");
.commitInterval(Duration.ofSeconds(10)) props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
.commitBatchSize(65535) props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "1048576");
.maxCommitAttempts(100) props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100");
.maxDeferredCommits(100); }
ReceiverOptions<Integer, K> receiverOptions = ReceiverOptions.create(props);
Pattern pattern; Pattern pattern;
if (userId == null) { if (userId == null) {
pattern = Pattern.compile("tdlib\\." + getChannelName() + "\\.\\d+"); pattern = Pattern.compile("tdlib\\." + getChannelName() + "\\.\\d+");
@ -73,6 +74,8 @@ public abstract class KafkaConsumer<K> {
public abstract KafkaChannelName getChannelName(); public abstract KafkaChannelName getChannelName();
public abstract boolean isQuickResponse();
protected Flux<Timestamped<K>> retryIfCleanup(Flux<Timestamped<K>> eventFlux) { protected Flux<Timestamped<K>> retryIfCleanup(Flux<Timestamped<K>> eventFlux) {
return eventFlux.retryWhen(Retry return eventFlux.retryWhen(Retry
.backoff(Long.MAX_VALUE, Duration.ofMillis(100)) .backoff(Long.MAX_VALUE, Duration.ofMillis(100))
@ -114,7 +117,7 @@ public abstract class KafkaConsumer<K> {
SignalType.ON_ERROR, SignalType.ON_ERROR,
SignalType.ON_COMPLETE SignalType.ON_COMPLETE
) )
.doOnNext(result -> result.receiverOffset().acknowledge()) //.doOnNext(result -> result.receiverOffset().acknowledge())
.map(record -> { .map(record -> {
if (record.timestampType() == TimestampType.CREATE_TIME) { if (record.timestampType() == TimestampType.CREATE_TIME) {
return new Timestamped<>(record.timestamp(), record.value()); return new Timestamped<>(record.timestamp(), record.value());

View File

@ -26,7 +26,8 @@ public abstract class KafkaProducer<K> {
Map<String, Object> props = new HashMap<>(); Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaParameters.bootstrapServers()); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaParameters.bootstrapServers());
props.put(ProducerConfig.CLIENT_ID_CONFIG, kafkaParameters.clientId()); props.put(ProducerConfig.CLIENT_ID_CONFIG, kafkaParameters.clientId());
props.put(ProducerConfig.ACKS_CONFIG, "1"); //props.put(ProducerConfig.ACKS_CONFIG, "1");
props.put(ProducerConfig.ACKS_CONFIG, "0");
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy"); props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);

View File

@ -15,6 +15,7 @@ import java.util.logging.Level;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import reactor.core.Disposable; import reactor.core.Disposable;
import reactor.core.publisher.BufferOverflowStrategy;
import reactor.core.publisher.Flux; import reactor.core.publisher.Flux;
import reactor.core.publisher.GroupedFlux; import reactor.core.publisher.GroupedFlux;
import reactor.core.publisher.SignalType; import reactor.core.publisher.SignalType;
@ -31,23 +32,18 @@ public class KafkaSharedTdlibClients implements Closeable {
private final AtomicReference<Disposable> responsesSub = new AtomicReference<>(); private final AtomicReference<Disposable> responsesSub = new AtomicReference<>();
private final Disposable requestsSub; private final Disposable requestsSub;
private final AtomicReference<Disposable> eventsSub = new AtomicReference<>(); private final AtomicReference<Disposable> eventsSub = new AtomicReference<>();
private final Flux<GroupedFlux<Long, Timestamped<OnResponse<Object>>>> responses; private final Flux<Timestamped<OnResponse<Object>>> responses;
private final Flux<GroupedFlux<Long, Timestamped<ClientBoundEvent>>> events; private final Flux<Timestamped<ClientBoundEvent>> events;
private final Many<OnRequest<?>> requests = Sinks.many().unicast() private final Many<OnRequest<?>> requests = Sinks.many().unicast()
.onBackpressureBuffer(Queues.<OnRequest<?>>get(65535).get()); .onBackpressureBuffer(Queues.<OnRequest<?>>get(65535).get());
public KafkaSharedTdlibClients(KafkaTdlibClientsChannels kafkaTdlibClientsChannels) { public KafkaSharedTdlibClients(KafkaTdlibClientsChannels kafkaTdlibClientsChannels) {
this.kafkaTdlibClientsChannels = kafkaTdlibClientsChannels; this.kafkaTdlibClientsChannels = kafkaTdlibClientsChannels;
this.responses = kafkaTdlibClientsChannels.response().consumeMessages("td-responses") this.responses = kafkaTdlibClientsChannels.response().consumeMessages("td-responses")
.onBackpressureBuffer() .publish()
.groupBy(k1 -> k1.data().clientId(), 1)
.replay()
.autoConnect(1, this.responsesSub::set); .autoConnect(1, this.responsesSub::set);
this.events = kafkaTdlibClientsChannels.events().consumeMessages("td-handler") this.events = kafkaTdlibClientsChannels.events().consumeMessages("td-handler")
.onBackpressureBuffer() .publish()
.groupBy(k -> k.data().userId(), 1)
.doOnNext(g -> LOG.info("Receiving updates of client: {}", g.key()))
.replay()
.autoConnect(1, this.eventsSub::set); .autoConnect(1, this.eventsSub::set);
this.requestsSub = kafkaTdlibClientsChannels.request() this.requestsSub = kafkaTdlibClientsChannels.request()
.sendMessages(0L, requests.asFlux()) .sendMessages(0L, requests.asFlux())
@ -56,20 +52,18 @@ public class KafkaSharedTdlibClients implements Closeable {
} }
public Flux<Timestamped<OnResponse<Object>>> responses(long clientId) { public Flux<Timestamped<OnResponse<Object>>> responses(long clientId) {
return responses.filter(group -> group.key() == clientId) return responses
.take(1, true) .filter(group -> group.data().clientId() == clientId)
.singleOrEmpty() //.onBackpressureBuffer(8192, BufferOverflowStrategy.DROP_OLDEST)
.flatMapMany(Function.identity()) .log("req-" + clientId, Level.FINEST, SignalType.REQUEST);
.log("req-" + clientId, Level.FINE, SignalType.REQUEST);
} }
public Flux<Timestamped<ClientBoundEvent>> events(long userId) { public Flux<Timestamped<ClientBoundEvent>> events(long userId) {
return events.filter(group -> group.key() == userId) return events
.take(1, true) .filter(group -> group.data().userId() == userId)
.singleOrEmpty() //.onBackpressureBuffer(8192, BufferOverflowStrategy.DROP_OLDEST)
.flatMapMany(Function.identity()) .doOnSubscribe(s -> LOG.info("Reading updates of client: {}", userId))
.doOnSubscribe(s -> LOG.info("Reading updates of client: {}", userId)); .log("event-" + userId, Level.FINEST, SignalType.REQUEST);
//.log("event-" + userId, Level.FINE, SignalType.REQUEST);
} }
public Many<OnRequest<?>> requests() { public Many<OnRequest<?>> requests() {

View File

@ -14,6 +14,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function; import java.util.function.Function;
import java.util.logging.Level; import java.util.logging.Level;
import reactor.core.Disposable; import reactor.core.Disposable;
import reactor.core.publisher.BufferOverflowStrategy;
import reactor.core.publisher.Flux; import reactor.core.publisher.Flux;
import reactor.core.publisher.GroupedFlux; import reactor.core.publisher.GroupedFlux;
import reactor.core.publisher.SignalType; import reactor.core.publisher.SignalType;
@ -29,28 +30,25 @@ public class KafkaSharedTdlibServers implements Closeable {
private final AtomicReference<Disposable> requestsSub = new AtomicReference<>(); private final AtomicReference<Disposable> requestsSub = new AtomicReference<>();
private final Many<OnResponse<TdApi.Object>> responses = Sinks.many().unicast().onBackpressureBuffer( private final Many<OnResponse<TdApi.Object>> responses = Sinks.many().unicast().onBackpressureBuffer(
Queues.<OnResponse<TdApi.Object>>get(65535).get()); Queues.<OnResponse<TdApi.Object>>get(65535).get());
private final Flux<GroupedFlux<Long, Timestamped<OnRequest<Object>>>> requests; private final Flux<Timestamped<OnRequest<Object>>> requests;
public KafkaSharedTdlibServers(KafkaTdlibServersChannels kafkaTdlibServersChannels) { public KafkaSharedTdlibServers(KafkaTdlibServersChannels kafkaTdlibServersChannels) {
this.kafkaTdlibServersChannels = kafkaTdlibServersChannels; this.kafkaTdlibServersChannels = kafkaTdlibServersChannels;
this.responsesSub = kafkaTdlibServersChannels.response() this.responsesSub = kafkaTdlibServersChannels.response()
.sendMessages(0L, responses.asFlux()) .sendMessages(0L, responses.asFlux().log("responses", Level.FINEST, SignalType.ON_NEXT))
.subscribeOn(Schedulers.parallel()) .subscribeOn(Schedulers.parallel())
.subscribe(); .subscribe();
this.requests = kafkaTdlibServersChannels.request() this.requests = kafkaTdlibServersChannels.request()
.consumeMessages("td-requests") .consumeMessages("td-requests")
.onBackpressureBuffer() .publish()
.groupBy(k -> k.data().userId(), 1)
.replay()
.autoConnect(1, this.requestsSub::set); .autoConnect(1, this.requestsSub::set);
} }
public Flux<Timestamped<OnRequest<Object>>> requests(long userId) { public Flux<Timestamped<OnRequest<Object>>> requests(long userId) {
return requests.filter(group -> group.key() == userId) return requests
.take(1, true) .filter(group -> group.data().userId() == userId)
.singleOrEmpty() //.onBackpressureBuffer(8192, BufferOverflowStrategy.DROP_OLDEST)
.flatMapMany(Function.identity()) .log("requests-" + userId, Level.FINEST, SignalType.REQUEST, SignalType.ON_NEXT);
.log("req-" + userId, Level.FINE, SignalType.REQUEST, SignalType.ON_NEXT);
} }
public Disposable events(Flux<ClientBoundEvent> eventFlux) { public Disposable events(Flux<ClientBoundEvent> eventFlux) {

View File

@ -16,4 +16,9 @@ public class KafkaTdlibRequestConsumer extends KafkaConsumer<OnRequest<TdApi.Obj
return KafkaChannelName.TDLIB_REQUEST; return KafkaChannelName.TDLIB_REQUEST;
} }
@Override
public boolean isQuickResponse() {
return true;
}
} }

View File

@ -15,4 +15,9 @@ public class KafkaTdlibResponseConsumer extends KafkaConsumer<OnResponse<TdApi.O
return KafkaChannelName.TDLIB_RESPONSE; return KafkaChannelName.TDLIB_RESPONSE;
} }
@Override
public boolean isQuickResponse() {
return true;
}
} }